file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
applicable_declarations.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Applicable declarations management.
use properties::PropertyDeclarationBlock;
use rule_tree::{CascadeLevel, StyleSource};
use servo_arc::Arc;
use shared_lock::Locked;
use smallvec::SmallVec;
use std::fmt::{self, Debug};
use std::mem;
/// List of applicable declarations. This is a transient structure that shuttles
/// declarations between selector matching and inserting into the rule tree, and
/// therefore we want to avoid heap-allocation where possible.
///
/// In measurements on wikipedia, we pretty much never have more than 8 applicable
/// declarations, so we could consider making this 8 entries instead of 16.
/// However, it may depend a lot on workload, and stack space is cheap.
pub type ApplicableDeclarationList = SmallVec<[ApplicableDeclarationBlock; 16]>;
/// Blink uses 18 bits to store source order, and does not check overflow [1].
/// That's a limit that could be reached in realistic webpages, so we use
/// 24 bits and enforce defined behavior in the overflow case.
///
/// Note that the value of 24 is also hard-coded into the level() accessor,
/// which does a byte-aligned load of the 4th byte. If you change this value
/// you'll need to change that as well.
///
/// [1] https://cs.chromium.org/chromium/src/third_party/WebKit/Source/core/css/
/// RuleSet.h?l=128&rcl=90140ab80b84d0f889abc253410f44ed54ae04f3
const SOURCE_ORDER_BITS: usize = 24;
const SOURCE_ORDER_MASK: u32 = (1 << SOURCE_ORDER_BITS) - 1;
const SOURCE_ORDER_MAX: u32 = SOURCE_ORDER_MASK;
/// Stores the source order of a block and the cascade level it belongs to.
#[derive(Clone, Copy, Eq, MallocSizeOf, PartialEq)]
struct SourceOrderAndCascadeLevel(u32);
impl SourceOrderAndCascadeLevel {
fn new(source_order: u32, cascade_level: CascadeLevel) -> SourceOrderAndCascadeLevel {
let mut bits = ::std::cmp::min(source_order, SOURCE_ORDER_MAX);
bits |= (cascade_level as u8 as u32) << SOURCE_ORDER_BITS;
SourceOrderAndCascadeLevel(bits)
}
fn order(&self) -> u32 {
self.0 & SOURCE_ORDER_MASK
}
fn level(&self) -> CascadeLevel {
unsafe {
// Transmute rather than shifting so that we're sure the compiler
// emits a simple byte-aligned load.
let as_bytes: [u8; 4] = mem::transmute(self.0);
CascadeLevel::from_byte(as_bytes[3])
}
}
}
impl Debug for SourceOrderAndCascadeLevel {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("SourceOrderAndCascadeLevel")
.field("order", &self.order())
.field("level", &self.level())
.finish()
}
}
/// A property declaration together with its precedence among rules of equal
/// specificity so that we can sort them.
///
/// This represents the declarations in a given declaration block for a given
/// importance.
#[derive(Clone, Debug, MallocSizeOf, PartialEq)]
pub struct ApplicableDeclarationBlock {
/// The style source, either a style rule, or a property declaration block.
#[ignore_malloc_size_of = "Arc"]
pub source: StyleSource,
/// The source order of the block, and the cascade level it belongs to.
order_and_level: SourceOrderAndCascadeLevel,
/// The specificity of the selector this block is represented by.
pub specificity: u32,
}
impl ApplicableDeclarationBlock {
/// Constructs an applicable declaration block from a given property
/// declaration block and importance.
#[inline]
pub fn from_declarations(
declarations: Arc<Locked<PropertyDeclarationBlock>>,
level: CascadeLevel,
) -> Self {
ApplicableDeclarationBlock {
source: StyleSource::Declarations(declarations),
order_and_level: SourceOrderAndCascadeLevel::new(0, level),
specificity: 0,
}
}
|
order_and_level: SourceOrderAndCascadeLevel::new(order, level),
specificity: specificity,
}
}
/// Returns the source order of the block.
#[inline]
pub fn source_order(&self) -> u32 {
self.order_and_level.order()
}
/// Returns the cascade level of the block.
#[inline]
pub fn level(&self) -> CascadeLevel {
self.order_and_level.level()
}
/// Convenience method to consume self and return the source alongside the
/// level.
#[inline]
pub fn order_and_level(self) -> (StyleSource, CascadeLevel) {
let level = self.level();
(self.source, level)
}
}
|
/// Constructs an applicable declaration block from the given components
#[inline]
pub fn new(source: StyleSource, order: u32, level: CascadeLevel, specificity: u32) -> Self {
ApplicableDeclarationBlock {
source: source,
|
random_line_split
|
asteroid.rs
|
pub struct Asteroid {
position: (f32, f32),
velocity: (f32, f32),
radius: f32,
}
impl Asteroid {
pub fn update(&mut self) {
self.position.0 += self.velocity.0;
self.position.1 += self.velocity.1;
}
pub fn pos(&self) -> (f32, f32) {
self.position.clone()
}
pub fn radius(&self) -> f32 {
self.radius
}
pub fn still_alive(&self, width: f32, height: f32) -> bool {
!(self.position.0 < -width || self.position.0 > width || self.position.1 < -height ||
|
}
pub fn new() -> Asteroid {
Asteroid {
position: (0.0, 0.0),
velocity: (0.0, 0.0),
radius: 10.0,
}
}
pub fn new_with_attr(pos: (f32, f32), vel: (f32, f32), rad: f32) -> Asteroid {
Asteroid {
position: pos,
velocity: vel,
radius: rad,
}
}
}
pub const INDICES: [u16; 1074] =
[0, 1, 2, 0, 2, 3, 0, 3, 4, 0, 4, 5, 0, 5, 6, 0, 6, 7, 0, 7, 8,
0, 8, 9, 0, 9, 10, 0, 10, 11, 0, 11, 12, 0, 12, 13, 0, 13, 14,
0, 14, 15, 0, 15, 16, 0, 16, 17, 0, 17, 18, 0, 18, 19, 0, 19,
20, 0, 20, 21, 0, 21, 22, 0, 22, 23, 0, 23, 24, 0, 24, 25, 0,
25, 26, 0, 26, 27, 0, 27, 28, 0, 28, 29, 0, 29, 30, 0, 30, 31,
0, 31, 32, 0, 32, 33, 0, 33, 34, 0, 34, 35, 0, 35, 36, 0, 36,
37, 0, 37, 38, 0, 38, 39, 0, 39, 40, 0, 40, 41, 0, 41, 42, 0,
42, 43, 0, 43, 44, 0, 44, 45, 0, 45, 46, 0, 46, 47, 0, 47, 48,
0, 48, 49, 0, 49, 50, 0, 50, 51, 0, 51, 52, 0, 52, 53, 0, 53,
54, 0, 54, 55, 0, 55, 56, 0, 56, 57, 0, 57, 58, 0, 58, 59, 0,
59, 60, 0, 60, 61, 0, 61, 62, 0, 62, 63, 0, 63, 64, 0, 64, 65,
0, 65, 66, 0, 66, 67, 0, 67, 68, 0, 68, 69, 0, 69, 70, 0, 70,
71, 0, 71, 72, 0, 72, 73, 0, 73, 74, 0, 74, 75, 0, 75, 76, 0,
76, 77, 0, 77, 78, 0, 78, 79, 0, 79, 80, 0, 80, 81, 0, 81, 82,
0, 82, 83, 0, 83, 84, 0, 84, 85, 0, 85, 86, 0, 86, 87, 0, 87,
88, 0, 88, 89, 0, 89, 90, 0, 90, 91, 0, 91, 92, 0, 92, 93, 0,
93, 94, 0, 94, 95, 0, 95, 96, 0, 96, 97, 0, 97, 98, 0, 98, 99,
0, 99, 100, 0, 100, 101, 0, 101, 102, 0, 102, 103, 0, 103, 104,
0, 104, 105, 0, 105, 106, 0, 106, 107, 0, 107, 108, 0, 108, 109,
0, 109, 110, 0, 110, 111, 0, 111, 112, 0, 112, 113, 0, 113, 114,
0, 114, 115, 0, 115, 116, 0, 116, 117, 0, 117, 118, 0, 118, 119,
0, 119, 120, 0, 120, 121, 0, 121, 122, 0, 122, 123, 0, 123, 124,
0, 124, 125, 0, 125, 126, 0, 126, 127, 0, 127, 128, 0, 128, 129,
0, 129, 130, 0, 130, 131, 0, 131, 132, 0, 132, 133, 0, 133, 134,
0, 134, 135, 0, 135, 136, 0, 136, 137, 0, 137, 138, 0, 138, 139,
0, 139, 140, 0, 140, 141, 0, 141, 142, 0, 142, 143, 0, 143, 144,
0, 144, 145, 0, 145, 146, 0, 146, 147, 0, 147, 148, 0, 148, 149,
0, 149, 150, 0, 150, 151, 0, 151, 152, 0, 152, 153, 0, 153, 154,
0, 154, 155, 0, 155, 156, 0, 156, 157, 0, 157, 158, 0, 158, 159,
0, 159, 160, 0, 160, 161, 0, 161, 162, 0, 162, 163, 0, 163, 164,
0, 164, 165, 0, 165, 166, 0, 166, 167, 0, 167, 168, 0, 168, 169,
0, 169, 170, 0, 170, 171, 0, 171, 172, 0, 172, 173, 0, 173, 174,
0, 174, 175, 0, 175, 176, 0, 176, 177, 0, 177, 178, 0, 178, 179,
0, 179, 180, 0, 180, 181, 0, 181, 182, 0, 182, 183, 0, 183, 184,
0, 184, 185, 0, 185, 186, 0, 186, 187, 0, 187, 188, 0, 188, 189,
0, 189, 190, 0, 190, 191, 0, 191, 192, 0, 192, 193, 0, 193, 194,
0, 194, 195, 0, 195, 196, 0, 196, 197, 0, 197, 198, 0, 198, 199,
0, 199, 200, 0, 200, 201, 0, 201, 202, 0, 202, 203, 0, 203, 204,
0, 204, 205, 0, 205, 206, 0, 206, 207, 0, 207, 208, 0, 208, 209,
0, 209, 210, 0, 210, 211, 0, 211, 212, 0, 212, 213, 0, 213, 214,
0, 214, 215, 0, 215, 216, 0, 216, 217, 0, 217, 218, 0, 218, 219,
0, 219, 220, 0, 220, 221, 0, 221, 222, 0, 222, 223, 0, 223, 224,
0, 224, 225, 0, 225, 226, 0, 226, 227, 0, 227, 228, 0, 228, 229,
0, 229, 230, 0, 230, 231, 0, 231, 232, 0, 232, 233, 0, 233, 234,
0, 234, 235, 0, 235, 236, 0, 236, 237, 0, 237, 238, 0, 238, 239,
0, 239, 240, 0, 240, 241, 0, 241, 242, 0, 242, 243, 0, 243, 244,
0, 244, 245, 0, 245, 246, 0, 246, 247, 0, 247, 248, 0, 248, 249,
0, 249, 250, 0, 250, 251, 0, 251, 252, 0, 252, 253, 0, 253, 254,
0, 254, 255, 0, 255, 256, 0, 256, 257, 0, 257, 258, 0, 258, 259,
0, 259, 260, 0, 260, 261, 0, 261, 262, 0, 262, 263, 0, 263, 264,
0, 264, 265, 0, 265, 266, 0, 266, 267, 0, 267, 268, 0, 268, 269,
0, 269, 270, 0, 270, 271, 0, 271, 272, 0, 272, 273, 0, 273, 274,
0, 274, 275, 0, 275, 276, 0, 276, 277, 0, 277, 278, 0, 278, 279,
0, 279, 280, 0, 280, 281, 0, 281, 282, 0, 282, 283, 0, 283, 284,
0, 284, 285, 0, 285, 286, 0, 286, 287, 0, 287, 288, 0, 288, 289,
0, 289, 290, 0, 290, 291, 0, 291, 292, 0, 292, 293, 0, 293, 294,
0, 294, 295, 0, 295, 296, 0, 296, 297, 0, 297, 298, 0, 298, 299,
0, 299, 300, 0, 300, 301, 0, 301, 302, 0, 302, 303, 0, 303, 304,
0, 304, 305, 0, 305, 306, 0, 306, 307, 0, 307, 308, 0, 308, 309,
0, 309, 310, 0, 310, 311, 0, 311, 312, 0, 312, 313, 0, 313, 314,
0, 314, 315, 0, 315, 316, 0, 316, 317, 0, 317, 318, 0, 318, 319,
0, 319, 320, 0, 320, 321, 0, 321, 322, 0, 322, 323, 0, 323, 324,
0, 324, 325, 0, 325, 326, 0, 326, 327, 0, 327, 328, 0, 328, 329,
0, 329, 330, 0, 330, 331, 0, 331, 332, 0, 332, 333, 0, 333, 334,
0, 334, 335, 0, 335, 336, 0, 336, 337, 0, 337, 338, 0, 338, 339,
0, 339, 340, 0, 340, 341, 0, 341, 342, 0, 342, 343, 0, 343, 344,
0, 344, 345, 0, 345, 346, 0, 346, 347, 0, 347, 348, 0, 348, 349,
0, 349, 350, 0, 350, 351, 0, 351, 352, 0, 352, 353, 0, 353, 354,
0, 354, 355, 0, 355, 356, 0, 356, 357, 0, 357, 358, 0, 358, 359];
|
self.position.1 > height)
|
random_line_split
|
asteroid.rs
|
pub struct Asteroid {
position: (f32, f32),
velocity: (f32, f32),
radius: f32,
}
impl Asteroid {
pub fn update(&mut self) {
self.position.0 += self.velocity.0;
self.position.1 += self.velocity.1;
}
pub fn pos(&self) -> (f32, f32) {
self.position.clone()
}
pub fn radius(&self) -> f32 {
self.radius
}
pub fn still_alive(&self, width: f32, height: f32) -> bool {
!(self.position.0 < -width || self.position.0 > width || self.position.1 < -height ||
self.position.1 > height)
}
pub fn new() -> Asteroid {
Asteroid {
position: (0.0, 0.0),
velocity: (0.0, 0.0),
radius: 10.0,
}
}
pub fn new_with_attr(pos: (f32, f32), vel: (f32, f32), rad: f32) -> Asteroid
|
}
pub const INDICES: [u16; 1074] =
[0, 1, 2, 0, 2, 3, 0, 3, 4, 0, 4, 5, 0, 5, 6, 0, 6, 7, 0, 7, 8,
0, 8, 9, 0, 9, 10, 0, 10, 11, 0, 11, 12, 0, 12, 13, 0, 13, 14,
0, 14, 15, 0, 15, 16, 0, 16, 17, 0, 17, 18, 0, 18, 19, 0, 19,
20, 0, 20, 21, 0, 21, 22, 0, 22, 23, 0, 23, 24, 0, 24, 25, 0,
25, 26, 0, 26, 27, 0, 27, 28, 0, 28, 29, 0, 29, 30, 0, 30, 31,
0, 31, 32, 0, 32, 33, 0, 33, 34, 0, 34, 35, 0, 35, 36, 0, 36,
37, 0, 37, 38, 0, 38, 39, 0, 39, 40, 0, 40, 41, 0, 41, 42, 0,
42, 43, 0, 43, 44, 0, 44, 45, 0, 45, 46, 0, 46, 47, 0, 47, 48,
0, 48, 49, 0, 49, 50, 0, 50, 51, 0, 51, 52, 0, 52, 53, 0, 53,
54, 0, 54, 55, 0, 55, 56, 0, 56, 57, 0, 57, 58, 0, 58, 59, 0,
59, 60, 0, 60, 61, 0, 61, 62, 0, 62, 63, 0, 63, 64, 0, 64, 65,
0, 65, 66, 0, 66, 67, 0, 67, 68, 0, 68, 69, 0, 69, 70, 0, 70,
71, 0, 71, 72, 0, 72, 73, 0, 73, 74, 0, 74, 75, 0, 75, 76, 0,
76, 77, 0, 77, 78, 0, 78, 79, 0, 79, 80, 0, 80, 81, 0, 81, 82,
0, 82, 83, 0, 83, 84, 0, 84, 85, 0, 85, 86, 0, 86, 87, 0, 87,
88, 0, 88, 89, 0, 89, 90, 0, 90, 91, 0, 91, 92, 0, 92, 93, 0,
93, 94, 0, 94, 95, 0, 95, 96, 0, 96, 97, 0, 97, 98, 0, 98, 99,
0, 99, 100, 0, 100, 101, 0, 101, 102, 0, 102, 103, 0, 103, 104,
0, 104, 105, 0, 105, 106, 0, 106, 107, 0, 107, 108, 0, 108, 109,
0, 109, 110, 0, 110, 111, 0, 111, 112, 0, 112, 113, 0, 113, 114,
0, 114, 115, 0, 115, 116, 0, 116, 117, 0, 117, 118, 0, 118, 119,
0, 119, 120, 0, 120, 121, 0, 121, 122, 0, 122, 123, 0, 123, 124,
0, 124, 125, 0, 125, 126, 0, 126, 127, 0, 127, 128, 0, 128, 129,
0, 129, 130, 0, 130, 131, 0, 131, 132, 0, 132, 133, 0, 133, 134,
0, 134, 135, 0, 135, 136, 0, 136, 137, 0, 137, 138, 0, 138, 139,
0, 139, 140, 0, 140, 141, 0, 141, 142, 0, 142, 143, 0, 143, 144,
0, 144, 145, 0, 145, 146, 0, 146, 147, 0, 147, 148, 0, 148, 149,
0, 149, 150, 0, 150, 151, 0, 151, 152, 0, 152, 153, 0, 153, 154,
0, 154, 155, 0, 155, 156, 0, 156, 157, 0, 157, 158, 0, 158, 159,
0, 159, 160, 0, 160, 161, 0, 161, 162, 0, 162, 163, 0, 163, 164,
0, 164, 165, 0, 165, 166, 0, 166, 167, 0, 167, 168, 0, 168, 169,
0, 169, 170, 0, 170, 171, 0, 171, 172, 0, 172, 173, 0, 173, 174,
0, 174, 175, 0, 175, 176, 0, 176, 177, 0, 177, 178, 0, 178, 179,
0, 179, 180, 0, 180, 181, 0, 181, 182, 0, 182, 183, 0, 183, 184,
0, 184, 185, 0, 185, 186, 0, 186, 187, 0, 187, 188, 0, 188, 189,
0, 189, 190, 0, 190, 191, 0, 191, 192, 0, 192, 193, 0, 193, 194,
0, 194, 195, 0, 195, 196, 0, 196, 197, 0, 197, 198, 0, 198, 199,
0, 199, 200, 0, 200, 201, 0, 201, 202, 0, 202, 203, 0, 203, 204,
0, 204, 205, 0, 205, 206, 0, 206, 207, 0, 207, 208, 0, 208, 209,
0, 209, 210, 0, 210, 211, 0, 211, 212, 0, 212, 213, 0, 213, 214,
0, 214, 215, 0, 215, 216, 0, 216, 217, 0, 217, 218, 0, 218, 219,
0, 219, 220, 0, 220, 221, 0, 221, 222, 0, 222, 223, 0, 223, 224,
0, 224, 225, 0, 225, 226, 0, 226, 227, 0, 227, 228, 0, 228, 229,
0, 229, 230, 0, 230, 231, 0, 231, 232, 0, 232, 233, 0, 233, 234,
0, 234, 235, 0, 235, 236, 0, 236, 237, 0, 237, 238, 0, 238, 239,
0, 239, 240, 0, 240, 241, 0, 241, 242, 0, 242, 243, 0, 243, 244,
0, 244, 245, 0, 245, 246, 0, 246, 247, 0, 247, 248, 0, 248, 249,
0, 249, 250, 0, 250, 251, 0, 251, 252, 0, 252, 253, 0, 253, 254,
0, 254, 255, 0, 255, 256, 0, 256, 257, 0, 257, 258, 0, 258, 259,
0, 259, 260, 0, 260, 261, 0, 261, 262, 0, 262, 263, 0, 263, 264,
0, 264, 265, 0, 265, 266, 0, 266, 267, 0, 267, 268, 0, 268, 269,
0, 269, 270, 0, 270, 271, 0, 271, 272, 0, 272, 273, 0, 273, 274,
0, 274, 275, 0, 275, 276, 0, 276, 277, 0, 277, 278, 0, 278, 279,
0, 279, 280, 0, 280, 281, 0, 281, 282, 0, 282, 283, 0, 283, 284,
0, 284, 285, 0, 285, 286, 0, 286, 287, 0, 287, 288, 0, 288, 289,
0, 289, 290, 0, 290, 291, 0, 291, 292, 0, 292, 293, 0, 293, 294,
0, 294, 295, 0, 295, 296, 0, 296, 297, 0, 297, 298, 0, 298, 299,
0, 299, 300, 0, 300, 301, 0, 301, 302, 0, 302, 303, 0, 303, 304,
0, 304, 305, 0, 305, 306, 0, 306, 307, 0, 307, 308, 0, 308, 309,
0, 309, 310, 0, 310, 311, 0, 311, 312, 0, 312, 313, 0, 313, 314,
0, 314, 315, 0, 315, 316, 0, 316, 317, 0, 317, 318, 0, 318, 319,
0, 319, 320, 0, 320, 321, 0, 321, 322, 0, 322, 323, 0, 323, 324,
0, 324, 325, 0, 325, 326, 0, 326, 327, 0, 327, 328, 0, 328, 329,
0, 329, 330, 0, 330, 331, 0, 331, 332, 0, 332, 333, 0, 333, 334,
0, 334, 335, 0, 335, 336, 0, 336, 337, 0, 337, 338, 0, 338, 339,
0, 339, 340, 0, 340, 341, 0, 341, 342, 0, 342, 343, 0, 343, 344,
0, 344, 345, 0, 345, 346, 0, 346, 347, 0, 347, 348, 0, 348, 349,
0, 349, 350, 0, 350, 351, 0, 351, 352, 0, 352, 353, 0, 353, 354,
0, 354, 355, 0, 355, 356, 0, 356, 357, 0, 357, 358, 0, 358, 359];
|
{
Asteroid {
position: pos,
velocity: vel,
radius: rad,
}
}
|
identifier_body
|
asteroid.rs
|
pub struct Asteroid {
position: (f32, f32),
velocity: (f32, f32),
radius: f32,
}
impl Asteroid {
pub fn update(&mut self) {
self.position.0 += self.velocity.0;
self.position.1 += self.velocity.1;
}
pub fn pos(&self) -> (f32, f32) {
self.position.clone()
}
pub fn
|
(&self) -> f32 {
self.radius
}
pub fn still_alive(&self, width: f32, height: f32) -> bool {
!(self.position.0 < -width || self.position.0 > width || self.position.1 < -height ||
self.position.1 > height)
}
pub fn new() -> Asteroid {
Asteroid {
position: (0.0, 0.0),
velocity: (0.0, 0.0),
radius: 10.0,
}
}
pub fn new_with_attr(pos: (f32, f32), vel: (f32, f32), rad: f32) -> Asteroid {
Asteroid {
position: pos,
velocity: vel,
radius: rad,
}
}
}
pub const INDICES: [u16; 1074] =
[0, 1, 2, 0, 2, 3, 0, 3, 4, 0, 4, 5, 0, 5, 6, 0, 6, 7, 0, 7, 8,
0, 8, 9, 0, 9, 10, 0, 10, 11, 0, 11, 12, 0, 12, 13, 0, 13, 14,
0, 14, 15, 0, 15, 16, 0, 16, 17, 0, 17, 18, 0, 18, 19, 0, 19,
20, 0, 20, 21, 0, 21, 22, 0, 22, 23, 0, 23, 24, 0, 24, 25, 0,
25, 26, 0, 26, 27, 0, 27, 28, 0, 28, 29, 0, 29, 30, 0, 30, 31,
0, 31, 32, 0, 32, 33, 0, 33, 34, 0, 34, 35, 0, 35, 36, 0, 36,
37, 0, 37, 38, 0, 38, 39, 0, 39, 40, 0, 40, 41, 0, 41, 42, 0,
42, 43, 0, 43, 44, 0, 44, 45, 0, 45, 46, 0, 46, 47, 0, 47, 48,
0, 48, 49, 0, 49, 50, 0, 50, 51, 0, 51, 52, 0, 52, 53, 0, 53,
54, 0, 54, 55, 0, 55, 56, 0, 56, 57, 0, 57, 58, 0, 58, 59, 0,
59, 60, 0, 60, 61, 0, 61, 62, 0, 62, 63, 0, 63, 64, 0, 64, 65,
0, 65, 66, 0, 66, 67, 0, 67, 68, 0, 68, 69, 0, 69, 70, 0, 70,
71, 0, 71, 72, 0, 72, 73, 0, 73, 74, 0, 74, 75, 0, 75, 76, 0,
76, 77, 0, 77, 78, 0, 78, 79, 0, 79, 80, 0, 80, 81, 0, 81, 82,
0, 82, 83, 0, 83, 84, 0, 84, 85, 0, 85, 86, 0, 86, 87, 0, 87,
88, 0, 88, 89, 0, 89, 90, 0, 90, 91, 0, 91, 92, 0, 92, 93, 0,
93, 94, 0, 94, 95, 0, 95, 96, 0, 96, 97, 0, 97, 98, 0, 98, 99,
0, 99, 100, 0, 100, 101, 0, 101, 102, 0, 102, 103, 0, 103, 104,
0, 104, 105, 0, 105, 106, 0, 106, 107, 0, 107, 108, 0, 108, 109,
0, 109, 110, 0, 110, 111, 0, 111, 112, 0, 112, 113, 0, 113, 114,
0, 114, 115, 0, 115, 116, 0, 116, 117, 0, 117, 118, 0, 118, 119,
0, 119, 120, 0, 120, 121, 0, 121, 122, 0, 122, 123, 0, 123, 124,
0, 124, 125, 0, 125, 126, 0, 126, 127, 0, 127, 128, 0, 128, 129,
0, 129, 130, 0, 130, 131, 0, 131, 132, 0, 132, 133, 0, 133, 134,
0, 134, 135, 0, 135, 136, 0, 136, 137, 0, 137, 138, 0, 138, 139,
0, 139, 140, 0, 140, 141, 0, 141, 142, 0, 142, 143, 0, 143, 144,
0, 144, 145, 0, 145, 146, 0, 146, 147, 0, 147, 148, 0, 148, 149,
0, 149, 150, 0, 150, 151, 0, 151, 152, 0, 152, 153, 0, 153, 154,
0, 154, 155, 0, 155, 156, 0, 156, 157, 0, 157, 158, 0, 158, 159,
0, 159, 160, 0, 160, 161, 0, 161, 162, 0, 162, 163, 0, 163, 164,
0, 164, 165, 0, 165, 166, 0, 166, 167, 0, 167, 168, 0, 168, 169,
0, 169, 170, 0, 170, 171, 0, 171, 172, 0, 172, 173, 0, 173, 174,
0, 174, 175, 0, 175, 176, 0, 176, 177, 0, 177, 178, 0, 178, 179,
0, 179, 180, 0, 180, 181, 0, 181, 182, 0, 182, 183, 0, 183, 184,
0, 184, 185, 0, 185, 186, 0, 186, 187, 0, 187, 188, 0, 188, 189,
0, 189, 190, 0, 190, 191, 0, 191, 192, 0, 192, 193, 0, 193, 194,
0, 194, 195, 0, 195, 196, 0, 196, 197, 0, 197, 198, 0, 198, 199,
0, 199, 200, 0, 200, 201, 0, 201, 202, 0, 202, 203, 0, 203, 204,
0, 204, 205, 0, 205, 206, 0, 206, 207, 0, 207, 208, 0, 208, 209,
0, 209, 210, 0, 210, 211, 0, 211, 212, 0, 212, 213, 0, 213, 214,
0, 214, 215, 0, 215, 216, 0, 216, 217, 0, 217, 218, 0, 218, 219,
0, 219, 220, 0, 220, 221, 0, 221, 222, 0, 222, 223, 0, 223, 224,
0, 224, 225, 0, 225, 226, 0, 226, 227, 0, 227, 228, 0, 228, 229,
0, 229, 230, 0, 230, 231, 0, 231, 232, 0, 232, 233, 0, 233, 234,
0, 234, 235, 0, 235, 236, 0, 236, 237, 0, 237, 238, 0, 238, 239,
0, 239, 240, 0, 240, 241, 0, 241, 242, 0, 242, 243, 0, 243, 244,
0, 244, 245, 0, 245, 246, 0, 246, 247, 0, 247, 248, 0, 248, 249,
0, 249, 250, 0, 250, 251, 0, 251, 252, 0, 252, 253, 0, 253, 254,
0, 254, 255, 0, 255, 256, 0, 256, 257, 0, 257, 258, 0, 258, 259,
0, 259, 260, 0, 260, 261, 0, 261, 262, 0, 262, 263, 0, 263, 264,
0, 264, 265, 0, 265, 266, 0, 266, 267, 0, 267, 268, 0, 268, 269,
0, 269, 270, 0, 270, 271, 0, 271, 272, 0, 272, 273, 0, 273, 274,
0, 274, 275, 0, 275, 276, 0, 276, 277, 0, 277, 278, 0, 278, 279,
0, 279, 280, 0, 280, 281, 0, 281, 282, 0, 282, 283, 0, 283, 284,
0, 284, 285, 0, 285, 286, 0, 286, 287, 0, 287, 288, 0, 288, 289,
0, 289, 290, 0, 290, 291, 0, 291, 292, 0, 292, 293, 0, 293, 294,
0, 294, 295, 0, 295, 296, 0, 296, 297, 0, 297, 298, 0, 298, 299,
0, 299, 300, 0, 300, 301, 0, 301, 302, 0, 302, 303, 0, 303, 304,
0, 304, 305, 0, 305, 306, 0, 306, 307, 0, 307, 308, 0, 308, 309,
0, 309, 310, 0, 310, 311, 0, 311, 312, 0, 312, 313, 0, 313, 314,
0, 314, 315, 0, 315, 316, 0, 316, 317, 0, 317, 318, 0, 318, 319,
0, 319, 320, 0, 320, 321, 0, 321, 322, 0, 322, 323, 0, 323, 324,
0, 324, 325, 0, 325, 326, 0, 326, 327, 0, 327, 328, 0, 328, 329,
0, 329, 330, 0, 330, 331, 0, 331, 332, 0, 332, 333, 0, 333, 334,
0, 334, 335, 0, 335, 336, 0, 336, 337, 0, 337, 338, 0, 338, 339,
0, 339, 340, 0, 340, 341, 0, 341, 342, 0, 342, 343, 0, 343, 344,
0, 344, 345, 0, 345, 346, 0, 346, 347, 0, 347, 348, 0, 348, 349,
0, 349, 350, 0, 350, 351, 0, 351, 352, 0, 352, 353, 0, 353, 354,
0, 354, 355, 0, 355, 356, 0, 356, 357, 0, 357, 358, 0, 358, 359];
|
radius
|
identifier_name
|
basic-types-globals.rs
|
// Caveats - gdb prints any 8-bit value (meaning rust I8 and u8 values)
// as its numerical value along with its associated ASCII char, there
// doesn't seem to be any way around this. Also, gdb doesn't know
// about UTF-32 character encoding and will print a rust char as only
// its numerical value.
// min-lldb-version: 310
// ignore-gdb // Test temporarily ignored due to debuginfo tests being disabled, see PR 47155
// compile-flags:-g
// gdb-command:run
// gdbg-command:print 'basic_types_globals::B'
// gdbr-command:print B
// gdb-check:$1 = false
// gdbg-command:print 'basic_types_globals::I'
// gdbr-command:print I
// gdb-check:$2 = -1
// gdbg-command:print 'basic_types_globals::C'
// gdbr-command:print C
// gdbg-check:$3 = 97
// gdbr-check:$3 = 97 'a'
// gdbg-command:print/d 'basic_types_globals::I8'
// gdbr-command:print I8
// gdb-check:$4 = 68
// gdbg-command:print 'basic_types_globals::I16'
// gdbr-command:print I16
// gdb-check:$5 = -16
// gdbg-command:print 'basic_types_globals::I32'
// gdbr-command:print I32
// gdb-check:$6 = -32
// gdbg-command:print 'basic_types_globals::I64'
// gdbr-command:print I64
// gdb-check:$7 = -64
// gdbg-command:print 'basic_types_globals::U'
// gdbr-command:print U
// gdb-check:$8 = 1
// gdbg-command:print/d 'basic_types_globals::U8'
// gdbr-command:print U8
// gdb-check:$9 = 100
// gdbg-command:print 'basic_types_globals::U16'
// gdbr-command:print U16
// gdb-check:$10 = 16
// gdbg-command:print 'basic_types_globals::U32'
// gdbr-command:print U32
// gdb-check:$11 = 32
// gdbg-command:print 'basic_types_globals::U64'
// gdbr-command:print U64
// gdb-check:$12 = 64
// gdbg-command:print 'basic_types_globals::F32'
// gdbr-command:print F32
// gdb-check:$13 = 2.5
// gdbg-command:print 'basic_types_globals::F64'
// gdbr-command:print F64
// gdb-check:$14 = 3.5
// gdb-command:continue
#![allow(unused_variables)]
#![feature(omit_gdb_pretty_printer_section)]
#![omit_gdb_pretty_printer_section]
// N.B. These are `mut` only so they don't constant fold away.
static mut B: bool = false;
static mut I: isize = -1;
static mut C: char = 'a';
static mut I8: i8 = 68;
static mut I16: i16 = -16;
static mut I32: i32 = -32;
static mut I64: i64 = -64;
static mut U: usize = 1;
static mut U8: u8 = 100;
static mut U16: u16 = 16;
static mut U32: u32 = 32;
static mut U64: u64 = 64;
static mut F32: f32 = 2.5;
static mut F64: f64 = 3.5;
fn main() {
_zzz(); // #break
let a = unsafe { (B, I, C, I8, I16, I32, I64, U, U8, U16, U32, U64, F32, F64) };
}
fn _zzz()
|
{()}
|
identifier_body
|
|
basic-types-globals.rs
|
// Caveats - gdb prints any 8-bit value (meaning rust I8 and u8 values)
// as its numerical value along with its associated ASCII char, there
// doesn't seem to be any way around this. Also, gdb doesn't know
// about UTF-32 character encoding and will print a rust char as only
// its numerical value.
// min-lldb-version: 310
// ignore-gdb // Test temporarily ignored due to debuginfo tests being disabled, see PR 47155
// compile-flags:-g
// gdb-command:run
// gdbg-command:print 'basic_types_globals::B'
|
// gdb-check:$1 = false
// gdbg-command:print 'basic_types_globals::I'
// gdbr-command:print I
// gdb-check:$2 = -1
// gdbg-command:print 'basic_types_globals::C'
// gdbr-command:print C
// gdbg-check:$3 = 97
// gdbr-check:$3 = 97 'a'
// gdbg-command:print/d 'basic_types_globals::I8'
// gdbr-command:print I8
// gdb-check:$4 = 68
// gdbg-command:print 'basic_types_globals::I16'
// gdbr-command:print I16
// gdb-check:$5 = -16
// gdbg-command:print 'basic_types_globals::I32'
// gdbr-command:print I32
// gdb-check:$6 = -32
// gdbg-command:print 'basic_types_globals::I64'
// gdbr-command:print I64
// gdb-check:$7 = -64
// gdbg-command:print 'basic_types_globals::U'
// gdbr-command:print U
// gdb-check:$8 = 1
// gdbg-command:print/d 'basic_types_globals::U8'
// gdbr-command:print U8
// gdb-check:$9 = 100
// gdbg-command:print 'basic_types_globals::U16'
// gdbr-command:print U16
// gdb-check:$10 = 16
// gdbg-command:print 'basic_types_globals::U32'
// gdbr-command:print U32
// gdb-check:$11 = 32
// gdbg-command:print 'basic_types_globals::U64'
// gdbr-command:print U64
// gdb-check:$12 = 64
// gdbg-command:print 'basic_types_globals::F32'
// gdbr-command:print F32
// gdb-check:$13 = 2.5
// gdbg-command:print 'basic_types_globals::F64'
// gdbr-command:print F64
// gdb-check:$14 = 3.5
// gdb-command:continue
#![allow(unused_variables)]
#![feature(omit_gdb_pretty_printer_section)]
#![omit_gdb_pretty_printer_section]
// N.B. These are `mut` only so they don't constant fold away.
static mut B: bool = false;
static mut I: isize = -1;
static mut C: char = 'a';
static mut I8: i8 = 68;
static mut I16: i16 = -16;
static mut I32: i32 = -32;
static mut I64: i64 = -64;
static mut U: usize = 1;
static mut U8: u8 = 100;
static mut U16: u16 = 16;
static mut U32: u32 = 32;
static mut U64: u64 = 64;
static mut F32: f32 = 2.5;
static mut F64: f64 = 3.5;
fn main() {
_zzz(); // #break
let a = unsafe { (B, I, C, I8, I16, I32, I64, U, U8, U16, U32, U64, F32, F64) };
}
fn _zzz() {()}
|
// gdbr-command:print B
|
random_line_split
|
basic-types-globals.rs
|
// Caveats - gdb prints any 8-bit value (meaning rust I8 and u8 values)
// as its numerical value along with its associated ASCII char, there
// doesn't seem to be any way around this. Also, gdb doesn't know
// about UTF-32 character encoding and will print a rust char as only
// its numerical value.
// min-lldb-version: 310
// ignore-gdb // Test temporarily ignored due to debuginfo tests being disabled, see PR 47155
// compile-flags:-g
// gdb-command:run
// gdbg-command:print 'basic_types_globals::B'
// gdbr-command:print B
// gdb-check:$1 = false
// gdbg-command:print 'basic_types_globals::I'
// gdbr-command:print I
// gdb-check:$2 = -1
// gdbg-command:print 'basic_types_globals::C'
// gdbr-command:print C
// gdbg-check:$3 = 97
// gdbr-check:$3 = 97 'a'
// gdbg-command:print/d 'basic_types_globals::I8'
// gdbr-command:print I8
// gdb-check:$4 = 68
// gdbg-command:print 'basic_types_globals::I16'
// gdbr-command:print I16
// gdb-check:$5 = -16
// gdbg-command:print 'basic_types_globals::I32'
// gdbr-command:print I32
// gdb-check:$6 = -32
// gdbg-command:print 'basic_types_globals::I64'
// gdbr-command:print I64
// gdb-check:$7 = -64
// gdbg-command:print 'basic_types_globals::U'
// gdbr-command:print U
// gdb-check:$8 = 1
// gdbg-command:print/d 'basic_types_globals::U8'
// gdbr-command:print U8
// gdb-check:$9 = 100
// gdbg-command:print 'basic_types_globals::U16'
// gdbr-command:print U16
// gdb-check:$10 = 16
// gdbg-command:print 'basic_types_globals::U32'
// gdbr-command:print U32
// gdb-check:$11 = 32
// gdbg-command:print 'basic_types_globals::U64'
// gdbr-command:print U64
// gdb-check:$12 = 64
// gdbg-command:print 'basic_types_globals::F32'
// gdbr-command:print F32
// gdb-check:$13 = 2.5
// gdbg-command:print 'basic_types_globals::F64'
// gdbr-command:print F64
// gdb-check:$14 = 3.5
// gdb-command:continue
#![allow(unused_variables)]
#![feature(omit_gdb_pretty_printer_section)]
#![omit_gdb_pretty_printer_section]
// N.B. These are `mut` only so they don't constant fold away.
static mut B: bool = false;
static mut I: isize = -1;
static mut C: char = 'a';
static mut I8: i8 = 68;
static mut I16: i16 = -16;
static mut I32: i32 = -32;
static mut I64: i64 = -64;
static mut U: usize = 1;
static mut U8: u8 = 100;
static mut U16: u16 = 16;
static mut U32: u32 = 32;
static mut U64: u64 = 64;
static mut F32: f32 = 2.5;
static mut F64: f64 = 3.5;
fn main() {
_zzz(); // #break
let a = unsafe { (B, I, C, I8, I16, I32, I64, U, U8, U16, U32, U64, F32, F64) };
}
fn
|
() {()}
|
_zzz
|
identifier_name
|
process-detach.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-fast
// ignore-win32
// ignore-android
// This test ensures that the 'detach' field on processes does the right thing.
// By detaching the child process, they should be put into a separate process
// group. We test this by spawning a detached process, then killing our own
// group with a signal.
//
// Note that the first thing we do is put ourselves in our own process group so
// we don't interfere with other running tests.
extern crate green;
extern crate rustuv;
use std::libc;
use std::io::process;
use std::io::signal::{Listener, Interrupt};
#[start]
fn start(argc: int, argv: **u8) -> int {
green::start(argc, argv, rustuv::event_loop, main)
}
fn
|
() {
unsafe { libc::setsid(); }
let config = process::ProcessConfig {
program : "/bin/sh",
args: &[~"-c", ~"read a"],
detach: true,
.. process::ProcessConfig::new()
};
// we shouldn't die because of an interrupt
let mut l = Listener::new();
l.register(Interrupt).unwrap();
// spawn the child
let mut p = process::Process::configure(config).unwrap();
// send an interrupt to everyone in our process group
unsafe { libc::funcs::posix88::signal::kill(0, libc::SIGINT); }
// Wait for the child process to die (terminate it's stdin and the read
// should fail).
drop(p.stdin.take());
match p.wait() {
process::ExitStatus(..) => {}
process::ExitSignal(..) => fail!()
}
}
|
main
|
identifier_name
|
process-detach.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-fast
// ignore-win32
// ignore-android
// This test ensures that the 'detach' field on processes does the right thing.
// By detaching the child process, they should be put into a separate process
// group. We test this by spawning a detached process, then killing our own
// group with a signal.
//
// Note that the first thing we do is put ourselves in our own process group so
// we don't interfere with other running tests.
extern crate green;
extern crate rustuv;
use std::libc;
use std::io::process;
use std::io::signal::{Listener, Interrupt};
#[start]
fn start(argc: int, argv: **u8) -> int {
green::start(argc, argv, rustuv::event_loop, main)
}
fn main() {
unsafe { libc::setsid(); }
let config = process::ProcessConfig {
program : "/bin/sh",
|
// we shouldn't die because of an interrupt
let mut l = Listener::new();
l.register(Interrupt).unwrap();
// spawn the child
let mut p = process::Process::configure(config).unwrap();
// send an interrupt to everyone in our process group
unsafe { libc::funcs::posix88::signal::kill(0, libc::SIGINT); }
// Wait for the child process to die (terminate it's stdin and the read
// should fail).
drop(p.stdin.take());
match p.wait() {
process::ExitStatus(..) => {}
process::ExitSignal(..) => fail!()
}
}
|
args: &[~"-c", ~"read a"],
detach: true,
.. process::ProcessConfig::new()
};
|
random_line_split
|
process-detach.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-fast
// ignore-win32
// ignore-android
// This test ensures that the 'detach' field on processes does the right thing.
// By detaching the child process, they should be put into a separate process
// group. We test this by spawning a detached process, then killing our own
// group with a signal.
//
// Note that the first thing we do is put ourselves in our own process group so
// we don't interfere with other running tests.
extern crate green;
extern crate rustuv;
use std::libc;
use std::io::process;
use std::io::signal::{Listener, Interrupt};
#[start]
fn start(argc: int, argv: **u8) -> int {
green::start(argc, argv, rustuv::event_loop, main)
}
fn main()
|
// Wait for the child process to die (terminate it's stdin and the read
// should fail).
drop(p.stdin.take());
match p.wait() {
process::ExitStatus(..) => {}
process::ExitSignal(..) => fail!()
}
}
|
{
unsafe { libc::setsid(); }
let config = process::ProcessConfig {
program : "/bin/sh",
args: &[~"-c", ~"read a"],
detach: true,
.. process::ProcessConfig::new()
};
// we shouldn't die because of an interrupt
let mut l = Listener::new();
l.register(Interrupt).unwrap();
// spawn the child
let mut p = process::Process::configure(config).unwrap();
// send an interrupt to everyone in our process group
unsafe { libc::funcs::posix88::signal::kill(0, libc::SIGINT); }
|
identifier_body
|
shebang.rs
|
use crate::errors::*;
use crate::errors::{Error, Result};
use crate::filesystem::FileSystem;
use crate::kernel::execve::params::{Arg, ExecveParameters};
use std::ffi::CString;
use std::io::ErrorKind;
use std::os::unix::prelude::OsStrExt;
use std::path::{Path, PathBuf};
use std::{fs::File, io::Read};
use super::LoadResult;
#[derive(Debug, PartialEq, Eq)]
pub struct ExtractResult {
pub interpreter: PathBuf,
pub optional_arg: Option<CString>,
}
/// Length of the initial part of the executable to be read.
/// See https://elixir.bootlin.com/linux/v5.14-rc3/source/include/uapi/linux/binfmts.h#L19
const BINPRM_BUF_SIZE: usize = 256;
/// The loader function for script file which contains a shebang.
///
/// The definition of a script file can be found in the document of
/// [`extract()`] function.
///
/// According to man page execve(2). if the original command line to be executed
/// is `filename arg...`, a successful load will result in the command line
/// being replaced with `interpreter [optional-arg] filename arg...`. And the
/// return value of the function will be set to
/// `Ok(LoadResult::RestartWithNewParameters)`.
///
/// Note that this function will modify the value of `parameters`:
/// - Replace argv[0] with the raw guest side path
/// (`parameters.raw_guest_path`).
/// - Append `interpreter` and `optional-arg`(if exists) to the front of argv.
/// - Replace the path of the executable(`parameters.raw_guest_path`) with the
/// path of the `interpreter`.
pub(super) fn load_script(
_fs: &FileSystem,
parameters: &mut ExecveParameters,
) -> Result<LoadResult> {
// Extract shebang from script file
let extract_result = extract(¶meters.host_path)?;
// Modify execve parameters
// First, remove the old of argv[0].
parameters.argv.remove(0);
// Insert the path of this script into the front of argv.
parameters.argv.insert(
0,
Arg::CStringInSelf(unsafe {
CString::from_vec_unchecked(parameters.raw_guest_path.as_os_str().as_bytes().into())
}),
);
// If optional argument exist, also push it to the front.
if let Some(arg) = extract_result.optional_arg {
parameters.argv.insert(0, Arg::CStringInSelf(arg));
}
// Insert Path of the interpreter into the the front.
parameters.argv.insert(
0,
Arg::CStringInSelf(unsafe {
CString::from_vec_unchecked(extract_result.interpreter.as_os_str().as_bytes().into())
}),
);
// reset raw_guest_path to the new interpreter.
parameters.raw_guest_path = extract_result.interpreter;
return Ok(LoadResult::RestartWithNewParameters);
}
/// This function takes a host-side file path, checks for the presence of '#!'
/// and tries to parse out the interpreter and optional-arg.
///
/// As is defined by man execve(2), a script file staring with a line of the
/// form:
///
/// #!interpreter [optional-arg]
///
/// Where the path of `interpreter` comes after the `#!` (spaces are
/// allowed), and the remainder of the content immediately following is the
/// `optional-arg`. Note that optional-arg is treated as one argument and not as
/// multiple arguments.
fn
|
(host_path: &Path) -> Result<ExtractResult> {
let mut file = File::open(host_path)?;
let mut buffer = [0u8; BINPRM_BUF_SIZE];
// Read bytes from the beginning of the file, allowing the file length to be
// smaller than the buffer length.
if let Err(error) = file.read_exact(&mut buffer) {
if error.kind()!= ErrorKind::UnexpectedEof {
Err(error)?
}
}
// refuse to execute this if not start with #!
match (buffer[0], buffer[1]) {
(b'#', b'!') => {}
_ => {
return Err(Error::errno_with_msg(
ENOEXEC,
"file does not start with a shebang",
))
}
}
// First, calculate the position of the end of the first line.
let line_end = buffer
.iter()
.position(|&c| c == b'\n')
.unwrap_or(buffer.len());
// In the first line, search for the start position of interpreter.
let interpreter_start = buffer[2..line_end]
.iter()
.position(|&c| c!= b''&& c!= b'\t')
.map(|p| 2 + p)
.ok_or_else(|| Error::errno_with_msg(ENOEXEC, "no interpreter found"))?;
// search for the end position of interpreter.
let interpreter_end = buffer[interpreter_start..line_end]
.iter()
.position(|&c| c == b''|| c == b'\t' || c == b'\0')
.map(|p| interpreter_start + p)
.unwrap_or(line_end);
// If the end position of interpreter is the same as the length of buffer, we
// must assume the interpreter path is truncated, which is not allowed.
if interpreter_end == buffer.len() {
return Err(Error::errno_with_msg(
ENOEXEC,
"the interpreter path is truncated",
));
}
// Read interpreter
if interpreter_start == interpreter_end {
return Err(Error::errno_with_msg(
EACCES,
"path of interpreter is empty",
));
}
let interpreter = PathBuf::from(
std::str::from_utf8(&buffer[interpreter_start..interpreter_end])
.errno(ENOENT)
.context("path of interpreter is not valid")?,
);
// On Linux, the entire string following the interpreter name is passed as a
// single argument to the interpreter, and this string can include white space.
// In this case, optional argument is everything from ope_arg_start to the end
// of line, stripping external white space.
// Now check if there is an optional argument.
let opt_arg_start = buffer[interpreter_end..line_end]
.iter()
.position(|&c| c!= b''&& c!= b'\t')
.map(|p| interpreter_end + p)
.unwrap_or(line_end);
let opt_arg = if opt_arg_start == line_end {
// There is no optional argument
None
} else {
// Strip white space at the end.
let line_end = buffer[opt_arg_start..line_end]
.iter()
.rposition(|&c| c!= b''&& c!= b'\t')
.map(|p| opt_arg_start + p + 1)
.unwrap_or(opt_arg_start);
// Search for the position of next b'\0', as the range of optional argument.
let opt_arg_end = buffer[opt_arg_start..line_end]
.iter()
.position(|&c| c == b'\0')
.map(|p| opt_arg_start + p)
.unwrap_or(line_end);
if opt_arg_end - opt_arg_start > 0 {
Some(
CString::new(&buffer[opt_arg_start..opt_arg_end])
.errno(ENOEXEC)
.context("optional argument is not valid")?,
)
} else {
None
}
};
Ok(ExtractResult {
interpreter: interpreter,
optional_arg: opt_arg,
})
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::tests::get_test_rootfs_path;
#[test]
fn test_extract_shebang_not_script() {
let rootfs_path = get_test_rootfs_path();
// it should detect that `/bin/sleep` is not a script
assert_eq!(
Err(Error::errno(ENOEXEC)),
extract(&rootfs_path.join("bin/sleep"))
);
}
}
|
extract
|
identifier_name
|
shebang.rs
|
use crate::errors::*;
use crate::errors::{Error, Result};
use crate::filesystem::FileSystem;
use crate::kernel::execve::params::{Arg, ExecveParameters};
use std::ffi::CString;
use std::io::ErrorKind;
use std::os::unix::prelude::OsStrExt;
use std::path::{Path, PathBuf};
use std::{fs::File, io::Read};
use super::LoadResult;
#[derive(Debug, PartialEq, Eq)]
pub struct ExtractResult {
pub interpreter: PathBuf,
pub optional_arg: Option<CString>,
}
/// Length of the initial part of the executable to be read.
/// See https://elixir.bootlin.com/linux/v5.14-rc3/source/include/uapi/linux/binfmts.h#L19
const BINPRM_BUF_SIZE: usize = 256;
/// The loader function for script file which contains a shebang.
///
/// The definition of a script file can be found in the document of
/// [`extract()`] function.
///
/// According to man page execve(2). if the original command line to be executed
/// is `filename arg...`, a successful load will result in the command line
/// being replaced with `interpreter [optional-arg] filename arg...`. And the
/// return value of the function will be set to
/// `Ok(LoadResult::RestartWithNewParameters)`.
///
/// Note that this function will modify the value of `parameters`:
/// - Replace argv[0] with the raw guest side path
/// (`parameters.raw_guest_path`).
/// - Append `interpreter` and `optional-arg`(if exists) to the front of argv.
/// - Replace the path of the executable(`parameters.raw_guest_path`) with the
/// path of the `interpreter`.
pub(super) fn load_script(
_fs: &FileSystem,
parameters: &mut ExecveParameters,
) -> Result<LoadResult> {
// Extract shebang from script file
let extract_result = extract(¶meters.host_path)?;
// Modify execve parameters
// First, remove the old of argv[0].
parameters.argv.remove(0);
// Insert the path of this script into the front of argv.
parameters.argv.insert(
0,
Arg::CStringInSelf(unsafe {
CString::from_vec_unchecked(parameters.raw_guest_path.as_os_str().as_bytes().into())
}),
);
// If optional argument exist, also push it to the front.
if let Some(arg) = extract_result.optional_arg {
parameters.argv.insert(0, Arg::CStringInSelf(arg));
}
// Insert Path of the interpreter into the the front.
parameters.argv.insert(
0,
Arg::CStringInSelf(unsafe {
CString::from_vec_unchecked(extract_result.interpreter.as_os_str().as_bytes().into())
}),
);
// reset raw_guest_path to the new interpreter.
parameters.raw_guest_path = extract_result.interpreter;
return Ok(LoadResult::RestartWithNewParameters);
}
/// This function takes a host-side file path, checks for the presence of '#!'
/// and tries to parse out the interpreter and optional-arg.
///
/// As is defined by man execve(2), a script file staring with a line of the
/// form:
///
/// #!interpreter [optional-arg]
///
/// Where the path of `interpreter` comes after the `#!` (spaces are
/// allowed), and the remainder of the content immediately following is the
/// `optional-arg`. Note that optional-arg is treated as one argument and not as
/// multiple arguments.
fn extract(host_path: &Path) -> Result<ExtractResult> {
let mut file = File::open(host_path)?;
let mut buffer = [0u8; BINPRM_BUF_SIZE];
// Read bytes from the beginning of the file, allowing the file length to be
// smaller than the buffer length.
if let Err(error) = file.read_exact(&mut buffer)
|
// refuse to execute this if not start with #!
match (buffer[0], buffer[1]) {
(b'#', b'!') => {}
_ => {
return Err(Error::errno_with_msg(
ENOEXEC,
"file does not start with a shebang",
))
}
}
// First, calculate the position of the end of the first line.
let line_end = buffer
.iter()
.position(|&c| c == b'\n')
.unwrap_or(buffer.len());
// In the first line, search for the start position of interpreter.
let interpreter_start = buffer[2..line_end]
.iter()
.position(|&c| c!= b''&& c!= b'\t')
.map(|p| 2 + p)
.ok_or_else(|| Error::errno_with_msg(ENOEXEC, "no interpreter found"))?;
// search for the end position of interpreter.
let interpreter_end = buffer[interpreter_start..line_end]
.iter()
.position(|&c| c == b''|| c == b'\t' || c == b'\0')
.map(|p| interpreter_start + p)
.unwrap_or(line_end);
// If the end position of interpreter is the same as the length of buffer, we
// must assume the interpreter path is truncated, which is not allowed.
if interpreter_end == buffer.len() {
return Err(Error::errno_with_msg(
ENOEXEC,
"the interpreter path is truncated",
));
}
// Read interpreter
if interpreter_start == interpreter_end {
return Err(Error::errno_with_msg(
EACCES,
"path of interpreter is empty",
));
}
let interpreter = PathBuf::from(
std::str::from_utf8(&buffer[interpreter_start..interpreter_end])
.errno(ENOENT)
.context("path of interpreter is not valid")?,
);
// On Linux, the entire string following the interpreter name is passed as a
// single argument to the interpreter, and this string can include white space.
// In this case, optional argument is everything from ope_arg_start to the end
// of line, stripping external white space.
// Now check if there is an optional argument.
let opt_arg_start = buffer[interpreter_end..line_end]
.iter()
.position(|&c| c!= b''&& c!= b'\t')
.map(|p| interpreter_end + p)
.unwrap_or(line_end);
let opt_arg = if opt_arg_start == line_end {
// There is no optional argument
None
} else {
// Strip white space at the end.
let line_end = buffer[opt_arg_start..line_end]
.iter()
.rposition(|&c| c!= b''&& c!= b'\t')
.map(|p| opt_arg_start + p + 1)
.unwrap_or(opt_arg_start);
// Search for the position of next b'\0', as the range of optional argument.
let opt_arg_end = buffer[opt_arg_start..line_end]
.iter()
.position(|&c| c == b'\0')
.map(|p| opt_arg_start + p)
.unwrap_or(line_end);
if opt_arg_end - opt_arg_start > 0 {
Some(
CString::new(&buffer[opt_arg_start..opt_arg_end])
.errno(ENOEXEC)
.context("optional argument is not valid")?,
)
} else {
None
}
};
Ok(ExtractResult {
interpreter: interpreter,
optional_arg: opt_arg,
})
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::tests::get_test_rootfs_path;
#[test]
fn test_extract_shebang_not_script() {
let rootfs_path = get_test_rootfs_path();
// it should detect that `/bin/sleep` is not a script
assert_eq!(
Err(Error::errno(ENOEXEC)),
extract(&rootfs_path.join("bin/sleep"))
);
}
}
|
{
if error.kind() != ErrorKind::UnexpectedEof {
Err(error)?
}
}
|
conditional_block
|
shebang.rs
|
use crate::errors::*;
use crate::errors::{Error, Result};
use crate::filesystem::FileSystem;
use crate::kernel::execve::params::{Arg, ExecveParameters};
use std::ffi::CString;
use std::io::ErrorKind;
use std::os::unix::prelude::OsStrExt;
use std::path::{Path, PathBuf};
use std::{fs::File, io::Read};
use super::LoadResult;
#[derive(Debug, PartialEq, Eq)]
pub struct ExtractResult {
pub interpreter: PathBuf,
pub optional_arg: Option<CString>,
}
/// Length of the initial part of the executable to be read.
/// See https://elixir.bootlin.com/linux/v5.14-rc3/source/include/uapi/linux/binfmts.h#L19
const BINPRM_BUF_SIZE: usize = 256;
/// The loader function for script file which contains a shebang.
///
/// The definition of a script file can be found in the document of
/// [`extract()`] function.
///
/// According to man page execve(2). if the original command line to be executed
/// is `filename arg...`, a successful load will result in the command line
/// being replaced with `interpreter [optional-arg] filename arg...`. And the
/// return value of the function will be set to
/// `Ok(LoadResult::RestartWithNewParameters)`.
///
/// Note that this function will modify the value of `parameters`:
/// - Replace argv[0] with the raw guest side path
/// (`parameters.raw_guest_path`).
/// - Append `interpreter` and `optional-arg`(if exists) to the front of argv.
/// - Replace the path of the executable(`parameters.raw_guest_path`) with the
/// path of the `interpreter`.
pub(super) fn load_script(
_fs: &FileSystem,
parameters: &mut ExecveParameters,
) -> Result<LoadResult> {
// Extract shebang from script file
let extract_result = extract(¶meters.host_path)?;
// Modify execve parameters
// First, remove the old of argv[0].
parameters.argv.remove(0);
// Insert the path of this script into the front of argv.
parameters.argv.insert(
0,
Arg::CStringInSelf(unsafe {
CString::from_vec_unchecked(parameters.raw_guest_path.as_os_str().as_bytes().into())
}),
);
// If optional argument exist, also push it to the front.
if let Some(arg) = extract_result.optional_arg {
parameters.argv.insert(0, Arg::CStringInSelf(arg));
}
// Insert Path of the interpreter into the the front.
parameters.argv.insert(
0,
Arg::CStringInSelf(unsafe {
CString::from_vec_unchecked(extract_result.interpreter.as_os_str().as_bytes().into())
}),
);
// reset raw_guest_path to the new interpreter.
parameters.raw_guest_path = extract_result.interpreter;
return Ok(LoadResult::RestartWithNewParameters);
}
/// This function takes a host-side file path, checks for the presence of '#!'
/// and tries to parse out the interpreter and optional-arg.
///
/// As is defined by man execve(2), a script file staring with a line of the
/// form:
///
/// #!interpreter [optional-arg]
///
/// Where the path of `interpreter` comes after the `#!` (spaces are
/// allowed), and the remainder of the content immediately following is the
/// `optional-arg`. Note that optional-arg is treated as one argument and not as
/// multiple arguments.
fn extract(host_path: &Path) -> Result<ExtractResult> {
let mut file = File::open(host_path)?;
let mut buffer = [0u8; BINPRM_BUF_SIZE];
// Read bytes from the beginning of the file, allowing the file length to be
// smaller than the buffer length.
if let Err(error) = file.read_exact(&mut buffer) {
if error.kind()!= ErrorKind::UnexpectedEof {
Err(error)?
}
}
// refuse to execute this if not start with #!
match (buffer[0], buffer[1]) {
(b'#', b'!') => {}
_ => {
return Err(Error::errno_with_msg(
ENOEXEC,
"file does not start with a shebang",
))
}
}
// First, calculate the position of the end of the first line.
let line_end = buffer
.iter()
.position(|&c| c == b'\n')
.unwrap_or(buffer.len());
// In the first line, search for the start position of interpreter.
let interpreter_start = buffer[2..line_end]
.iter()
.position(|&c| c!= b''&& c!= b'\t')
.map(|p| 2 + p)
.ok_or_else(|| Error::errno_with_msg(ENOEXEC, "no interpreter found"))?;
// search for the end position of interpreter.
let interpreter_end = buffer[interpreter_start..line_end]
.iter()
.position(|&c| c == b''|| c == b'\t' || c == b'\0')
.map(|p| interpreter_start + p)
.unwrap_or(line_end);
// If the end position of interpreter is the same as the length of buffer, we
// must assume the interpreter path is truncated, which is not allowed.
if interpreter_end == buffer.len() {
return Err(Error::errno_with_msg(
ENOEXEC,
"the interpreter path is truncated",
));
}
|
return Err(Error::errno_with_msg(
EACCES,
"path of interpreter is empty",
));
}
let interpreter = PathBuf::from(
std::str::from_utf8(&buffer[interpreter_start..interpreter_end])
.errno(ENOENT)
.context("path of interpreter is not valid")?,
);
// On Linux, the entire string following the interpreter name is passed as a
// single argument to the interpreter, and this string can include white space.
// In this case, optional argument is everything from ope_arg_start to the end
// of line, stripping external white space.
// Now check if there is an optional argument.
let opt_arg_start = buffer[interpreter_end..line_end]
.iter()
.position(|&c| c!= b''&& c!= b'\t')
.map(|p| interpreter_end + p)
.unwrap_or(line_end);
let opt_arg = if opt_arg_start == line_end {
// There is no optional argument
None
} else {
// Strip white space at the end.
let line_end = buffer[opt_arg_start..line_end]
.iter()
.rposition(|&c| c!= b''&& c!= b'\t')
.map(|p| opt_arg_start + p + 1)
.unwrap_or(opt_arg_start);
// Search for the position of next b'\0', as the range of optional argument.
let opt_arg_end = buffer[opt_arg_start..line_end]
.iter()
.position(|&c| c == b'\0')
.map(|p| opt_arg_start + p)
.unwrap_or(line_end);
if opt_arg_end - opt_arg_start > 0 {
Some(
CString::new(&buffer[opt_arg_start..opt_arg_end])
.errno(ENOEXEC)
.context("optional argument is not valid")?,
)
} else {
None
}
};
Ok(ExtractResult {
interpreter: interpreter,
optional_arg: opt_arg,
})
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::tests::get_test_rootfs_path;
#[test]
fn test_extract_shebang_not_script() {
let rootfs_path = get_test_rootfs_path();
// it should detect that `/bin/sleep` is not a script
assert_eq!(
Err(Error::errno(ENOEXEC)),
extract(&rootfs_path.join("bin/sleep"))
);
}
}
|
// Read interpreter
if interpreter_start == interpreter_end {
|
random_line_split
|
lib.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
pub fn setup() {
const WORKSPACE: &str = "INSTA_WORKSPACE_ROOT";
const UPDATE: &str = "INSTA_UPDATE";
if std::env::var(WORKSPACE).is_err() {
let mut root = std::path::PathBuf::from(file!());
root.pop();
root.pop();
std::env::set_var(WORKSPACE, root);
}
if std::env::var(UPDATE).is_err() {
std::env::set_var(UPDATE, "no");
}
}
pub fn run(
test_name: &str,
is_cargo: bool,
snapshot: &str,
file: &str,
module: &str,
line: u32,
expr: &str,
) {
let command = if is_cargo {
"INSTA_UPDATE=1 cargo test..."
} else
|
;
println!(
"{:=^80}\n",
format!(" Run `{}` to update snapshots ", command)
);
let file_name = std::path::Path::new(file)
.file_name()
.and_then(|p| p.to_str())
.unwrap();
insta::_macro_support::assert_snapshot(
test_name.into(),
snapshot,
"unused",
// buck builds have a _unittest module suffix which cargo doesn't
// this makes the snapshot location consistent on both
&module.replacen("_unittest", "", 1),
file_name,
line,
expr,
)
.unwrap();
}
/// Assert that the serde json representation of given expression matches the snapshot
/// stored on disk.
#[macro_export]
macro_rules! assert_json {
($value: expr, $test_name: ident) => {{
$crate::setup();
$crate::run(
stringify!($test_name),
option_env!("CARGO_MANIFEST_DIR").is_some(),
&serde_json::to_string(&$value).unwrap(),
file!(),
module_path!(),
line!(),
stringify!($value),
);
}};
}
|
{
"buck test ... -- --env INSTA_UPDATE=1"
}
|
conditional_block
|
lib.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
pub fn
|
() {
const WORKSPACE: &str = "INSTA_WORKSPACE_ROOT";
const UPDATE: &str = "INSTA_UPDATE";
if std::env::var(WORKSPACE).is_err() {
let mut root = std::path::PathBuf::from(file!());
root.pop();
root.pop();
std::env::set_var(WORKSPACE, root);
}
if std::env::var(UPDATE).is_err() {
std::env::set_var(UPDATE, "no");
}
}
pub fn run(
test_name: &str,
is_cargo: bool,
snapshot: &str,
file: &str,
module: &str,
line: u32,
expr: &str,
) {
let command = if is_cargo {
"INSTA_UPDATE=1 cargo test..."
} else {
"buck test... -- --env INSTA_UPDATE=1"
};
println!(
"{:=^80}\n",
format!(" Run `{}` to update snapshots ", command)
);
let file_name = std::path::Path::new(file)
.file_name()
.and_then(|p| p.to_str())
.unwrap();
insta::_macro_support::assert_snapshot(
test_name.into(),
snapshot,
"unused",
// buck builds have a _unittest module suffix which cargo doesn't
// this makes the snapshot location consistent on both
&module.replacen("_unittest", "", 1),
file_name,
line,
expr,
)
.unwrap();
}
/// Assert that the serde json representation of given expression matches the snapshot
/// stored on disk.
#[macro_export]
macro_rules! assert_json {
($value: expr, $test_name: ident) => {{
$crate::setup();
$crate::run(
stringify!($test_name),
option_env!("CARGO_MANIFEST_DIR").is_some(),
&serde_json::to_string(&$value).unwrap(),
file!(),
module_path!(),
line!(),
stringify!($value),
);
}};
}
|
setup
|
identifier_name
|
lib.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
pub fn setup()
|
pub fn run(
test_name: &str,
is_cargo: bool,
snapshot: &str,
file: &str,
module: &str,
line: u32,
expr: &str,
) {
let command = if is_cargo {
"INSTA_UPDATE=1 cargo test..."
} else {
"buck test... -- --env INSTA_UPDATE=1"
};
println!(
"{:=^80}\n",
format!(" Run `{}` to update snapshots ", command)
);
let file_name = std::path::Path::new(file)
.file_name()
.and_then(|p| p.to_str())
.unwrap();
insta::_macro_support::assert_snapshot(
test_name.into(),
snapshot,
"unused",
// buck builds have a _unittest module suffix which cargo doesn't
// this makes the snapshot location consistent on both
&module.replacen("_unittest", "", 1),
file_name,
line,
expr,
)
.unwrap();
}
/// Assert that the serde json representation of given expression matches the snapshot
/// stored on disk.
#[macro_export]
macro_rules! assert_json {
($value: expr, $test_name: ident) => {{
$crate::setup();
$crate::run(
stringify!($test_name),
option_env!("CARGO_MANIFEST_DIR").is_some(),
&serde_json::to_string(&$value).unwrap(),
file!(),
module_path!(),
line!(),
stringify!($value),
);
}};
}
|
{
const WORKSPACE: &str = "INSTA_WORKSPACE_ROOT";
const UPDATE: &str = "INSTA_UPDATE";
if std::env::var(WORKSPACE).is_err() {
let mut root = std::path::PathBuf::from(file!());
root.pop();
root.pop();
std::env::set_var(WORKSPACE, root);
}
if std::env::var(UPDATE).is_err() {
std::env::set_var(UPDATE, "no");
}
}
|
identifier_body
|
lib.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
pub fn setup() {
const WORKSPACE: &str = "INSTA_WORKSPACE_ROOT";
const UPDATE: &str = "INSTA_UPDATE";
if std::env::var(WORKSPACE).is_err() {
let mut root = std::path::PathBuf::from(file!());
root.pop();
root.pop();
std::env::set_var(WORKSPACE, root);
}
if std::env::var(UPDATE).is_err() {
std::env::set_var(UPDATE, "no");
}
|
}
pub fn run(
test_name: &str,
is_cargo: bool,
snapshot: &str,
file: &str,
module: &str,
line: u32,
expr: &str,
) {
let command = if is_cargo {
"INSTA_UPDATE=1 cargo test..."
} else {
"buck test... -- --env INSTA_UPDATE=1"
};
println!(
"{:=^80}\n",
format!(" Run `{}` to update snapshots ", command)
);
let file_name = std::path::Path::new(file)
.file_name()
.and_then(|p| p.to_str())
.unwrap();
insta::_macro_support::assert_snapshot(
test_name.into(),
snapshot,
"unused",
// buck builds have a _unittest module suffix which cargo doesn't
// this makes the snapshot location consistent on both
&module.replacen("_unittest", "", 1),
file_name,
line,
expr,
)
.unwrap();
}
/// Assert that the serde json representation of given expression matches the snapshot
/// stored on disk.
#[macro_export]
macro_rules! assert_json {
($value: expr, $test_name: ident) => {{
$crate::setup();
$crate::run(
stringify!($test_name),
option_env!("CARGO_MANIFEST_DIR").is_some(),
&serde_json::to_string(&$value).unwrap(),
file!(),
module_path!(),
line!(),
stringify!($value),
);
}};
}
|
random_line_split
|
|
use_crate.rs
|
// aux-build:use_crate.rs
// aux-build:use_crate_2.rs
// build-aux-docs
// edition:2018
// compile-flags:--extern use_crate --extern use_crate_2
// During the buildup to Rust 2018, rustdoc would eagerly inline `pub use some_crate;` as if it
// were a module, so we changed it to make `pub use`ing crate roots remain as a `pub use` statement
// in docs... unless you added `#[doc(inline)]`.
#![crate_name = "local"]
// @!has-dir local/use_crate
// @has local/index.html
// @has - '//code' 'pub use use_crate'
pub use use_crate;
// @has-dir local/asdf
// @has local/asdf/index.html
// @has local/index.html '//a/@href' 'asdf/index.html'
pub use use_crate::asdf;
|
// @has-dir local/use_crate_2
// @has local/use_crate_2/index.html
// @has local/index.html '//a/@href' 'use_crate_2/index.html'
#[doc(inline)]
pub use use_crate_2;
|
random_line_split
|
|
lib.rs
|
//! This crate defines the high-level API for accessing Connected Devices.
#![feature(custom_derive, plugin, stmt_expr_attributes)]
#![plugin(serde_macros)]
#![plugin(clippy)]
// To prevent clippy being noisy with derive(...)
#![allow(used_underscore_binding)]
#[macro_use]
extern crate lazy_static;
extern crate chrono;
extern crate libc;
#[macro_use]
extern crate log;
extern crate rusqlite;
extern crate serde;
extern crate serde_json;
extern crate string_cache;
extern crate sublock;
extern crate transformable_channels;
/// Metadata on devices
pub mod services;
/// Public-facing API
pub mod api;
/// Tools for parsing from JSON.
pub mod parse;
/// Selecting one or more devices. Exposed through the API.
pub mod selector;
/// Values that may be sent to/received from devices
pub mod values;
/// Various utilities
pub mod util;
/// The back-end thread, in charge of the heavy lifting of managing adapters.
mod backend;
/// The manager provides an API for (un)registering adapters, services, channels, and
/// uses these to implements the taxonomy API.
pub mod manager;
/// The API for defining Adapters.
pub mod adapter;
/// Utilities for writing Adapters.
pub mod adapter_utils;
/// Utility module for inserting values in maps and keeping the insertion reversible in case of
/// any error.
pub mod transact;
/// Implementation of the database storing tags.
pub mod tag_storage;
/// Implementation of a fake adapter, controlled entirely programmatically. Designed to be used
/// as a component of tests.
pub mod fake_adapter;
/// Serialization and deserialization.
|
pub mod io;
|
random_line_split
|
|
main.rs
|
extern crate clap;
extern crate intcode;
// Time Start: Sun, 22 Dec 2019 09:23:38 -0500
// Time Finish 1: Sun, 22 Dec 2019 17:56:37 -0500 (8 hours, 32 minutes, 59 seconds)
// Time Finish 2:
// Time Total:
use clap::{Arg, App};
use intcode::util::{BBox,Direction,Direction::*};
use std::collections::HashMap;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::fmt;
use std::fs;
use std::{thread, time};
pub struct Portal {
a: (i64, i64), a_dir: Direction,
b: (i64, i64), b_dir: Direction,
}
pub struct Chart {
pub map: HashMap<(i64, i64), char>,
pub bbox: BBox,
pub portals: HashMap<(i64, i64), (i64, i64)>,
}
impl Chart {
pub fn new() -> Chart {
Chart {
map: HashMap::new(),
bbox: BBox::new(),
portals: HashMap::new(),
}
}
pub fn load(fname: &str) -> Chart {
let mut chart = Chart::new();
let contents = fs::read_to_string(fname)
.unwrap_or_else(|err| panic!("Error reading {}: {}", fname, err));
for (j, line) in contents.trim_end_matches('\n').split('\n').enumerate() {
for (i, ch) in line.chars().enumerate() {
chart.put(i as i64, j as i64, ch);
}
}
return chart;
}
pub fn item_at(&self, x: i64, y: i64) -> char {
match self.map.get(&(x,y)) {
Some(x) => *x,
None =>'',
}
}
pub fn locate(&self, ch: char) -> Option<(i64, i64)> {
for (key, val) in self.map.iter() {
if *val == ch {
return Some(*key);
}
}
return None;
}
pub fn follow_portal(&self, x0: i64, y0: i64) -> (i64, i64) {
match self.portals.get(&(x0,y0)) {
Some(&(x, y)) => (x,y),
None => panic!("Tried to follow non-existant portal at ({}, {})", x0, y0),
}
}
pub fn add_portal(&mut self, nest: bool, x0: i64, y0: i64, x1: i64, y1: i64) {
let sym = if nest {
if self.near_border(x0, y0, 4) { 'v' } else { '^' }
} else { '=' };
self.put(x0, y0, sym);
self.portals.insert((x0, y0), (x1, y1));
}
fn near_border(&self, x: i64, y: i64, dist: i64) -> bool {
((x-self.bbox.xmin()).abs() <= dist || (x-self.bbox.xmax()).abs() <= dist)
||
((y-self.bbox.ymin()).abs() <= dist || (y-self.bbox.ymax()).abs() <= dist)
}
pub fn get(&self, x: i64, y: i64) -> Option<&char> { self.map.get(&(x,y)) }
pub fn put(&mut self, x: i64, y: i64, obj: char)
|
pub fn shortest_path(&self, x: i64, y: i64, valid: impl Fn(i64, i64, i64, char) -> bool, wanted: impl Fn(i64, i64, i64, char) -> bool) -> Option<(i64, i64, Vec<Direction>)> {
let mut todo = VecDeque::new();
let mut seen = HashSet::new();
todo.push_back((x, y, 0, Vec::new()));
loop {
let (x, y, level, path) = todo.pop_front()?; // or return None
let spot = self.item_at(x, y);
if wanted(x, y, level, spot) {
return Some((x, y, path));
}
if valid(x, y, level, spot) {
for dir in Direction::each() {
let (mut a, mut b) = dir.step(x, y);
let mut lvl = match self.item_at(a, b) {
'^' => 1,
'v' => -1,
'=' => 99,
_ => 0,
};
if lvl!= 0 {
let (aa, bb) = self.follow_portal(a, b);
a = aa; b = bb;
if lvl == 99 { lvl = 0; }
}
if!seen.contains(&(a, b, level+lvl)) {
let mut newpath = path.clone();
newpath.push(*dir);
todo.push_back((a, b, level+lvl, newpath));
}
}
}
seen.insert((x, y, level));
};
}
fn map_portals(&mut self, nest: bool) {
fn update_portal(pmap: &mut HashMap<String, Portal>, name: &String, x: i64, y: i64, dir: Direction) {
match pmap.get_mut(name) {
Some(port) => {
port.b = (x, y);
port.b_dir = dir;
},
None => {
pmap.insert(name.clone(), Portal {
a: (x, y), a_dir: dir,
b: (0, 0), b_dir: North,
});
}
}
}
let mut stuff = HashMap::new();
for ((x, y), ch) in self.map.iter() {
match *ch {
'A'..='Z' => {
let mut name = String::new();
name.push(*ch);
// These run left-to-right or top-to-bottom, only pay
// attention when we find when we find the left or top
// letter ().
let ch2 = self.item_at(*x+1, *y);// LR
if 65 <= ch2 as u8 && ch2 as u8 <= 90 {
name.push(ch2);
match self.item_at(*x+2, *y) {
'.' => update_portal(&mut stuff, &name, *x+1, *y, East), // XX.
_ => update_portal(&mut stuff, &name, *x, *y, West), //.XX
}
}
let ch2 = self.item_at(*x, *y+1);// TB
if 65 <= ch2 as u8 && ch2 as u8 <= 90 {
name.push(ch2);
match self.item_at(*x, *y+2) {
'.' => update_portal(&mut stuff, &name, *x, *y+1, North), // Y/Y/.
_ => update_portal(&mut stuff, &name, *x, *y, South), //./Y/Y
}
}
},
_ => (),
}
}
for (name, port) in stuff.iter() {
match name.as_str() {
"AA" | "ZZ" => { // Remove the duplicate, leave just one symbol
let (x, y) = port.a;
let (a, b) = port.a_dir.rev().step(x, y);
self.put(a, b,'');
},
_ => {
let (xa, ya) = port.a;
let (xb, yb) = port.b;
// erase second char
let (a, b) = port.a_dir.rev().step(xa, ya); self.put(a, b,'');
let (a, b) = port.b_dir.rev().step(xb, yb); self.put(a, b,'');
// Portal a -> b
let (a, b) = port.b_dir.step(xb, yb);
self.add_portal(nest, xa, ya, a, b);
// Portal b -> a
let (a, b) = port.a_dir.step(xa, ya);
self.add_portal(nest, xb, yb, a, b);
},
}
}
}
}
impl fmt::Display for Chart {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for y in (self.bbox.ymin()..=self.bbox.ymax()).rev() {
for x in self.bbox.xmin()..=self.bbox.xmax() {
write!(f, "{}", self.item_at(x, y))?;
}
write!(f, "{}", "\n")?;
}
Ok(())
}
}
fn walk(chart: &mut Chart, mut x: i64, mut y: i64, path: &Vec<Direction>) {
let mut level = 0;
let mut was;
let mut portal;
for dir in path {
let (a, b) = dir.step(x, y);
x = a; y = b;
was = chart.item_at(x, y);
portal = false;
if was == '^' { level += 1; portal = true; }
if was == 'v' { level -= 1; portal = true; }
if portal {
let (a, b) = chart.follow_portal(x, y);
x = a; y = b;
}
was = chart.item_at(x, y);
chart.put(x, y, '@');
println!("\x1B[H\x1B[2J{}\n\n Level: {}", chart, level);
thread::sleep(time::Duration::from_millis(30));
chart.put(x, y, was);
}
}
fn main() {
let matches = App::new("Advent of Code 2019, Day 20")
.arg(Arg::with_name("FILE")
.help("Input file to process")
.index(1))
.get_matches();
let fname = matches.value_of("FILE").unwrap_or("20.in");
let mut chart = Chart::load(fname);
chart.map_portals(false);
let (mut x0, mut y0) = chart.locate('A').unwrap_or_else(|| panic!("Can't find start square"));
for dir in Direction::each() {
let (x, y) = dir.step(x0, y0);
if '.' == chart.item_at(x, y) {
x0 = x; y0 = y; break;
}
}
match chart.shortest_path(x0, y0, |_,_,_,ch| ch == '.' || ch == '=', |_,_,_,ch| ch == 'Z') {
Some((x, y, path)) => println!("Part 1: Shortest path from ({}, {}) to ({}, {}) = {}", x0, y0, x, y, path.len()-1),
None => println!("Bummer"),
}
let mut chart = Chart::load(fname);
chart.map_portals(true);
match chart.shortest_path(x0, y0, |_,_,l,ch| l >= 0 && (ch == '.' || ch == '^' || ch == 'v'), |_,_,l,ch| ch == 'Z' && l == 0) {
Some((x, y, path)) => {
// walk(&mut chart, x0, y0, &path);
println!("Part 2: Shortest path from ({}, {}) to ({}, {}) = {}", x0, y0, x, y, path.len()-1);
},
None => println!("Bummer"),
}
}
|
{
self.bbox.update(x, y);
self.map.insert((x, y), obj);
}
|
identifier_body
|
main.rs
|
extern crate clap;
extern crate intcode;
// Time Start: Sun, 22 Dec 2019 09:23:38 -0500
// Time Finish 1: Sun, 22 Dec 2019 17:56:37 -0500 (8 hours, 32 minutes, 59 seconds)
// Time Finish 2:
// Time Total:
use clap::{Arg, App};
use intcode::util::{BBox,Direction,Direction::*};
use std::collections::HashMap;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::fmt;
use std::fs;
use std::{thread, time};
pub struct Portal {
a: (i64, i64), a_dir: Direction,
b: (i64, i64), b_dir: Direction,
}
pub struct Chart {
pub map: HashMap<(i64, i64), char>,
pub bbox: BBox,
pub portals: HashMap<(i64, i64), (i64, i64)>,
}
impl Chart {
pub fn new() -> Chart {
Chart {
map: HashMap::new(),
bbox: BBox::new(),
portals: HashMap::new(),
}
}
pub fn load(fname: &str) -> Chart {
let mut chart = Chart::new();
let contents = fs::read_to_string(fname)
.unwrap_or_else(|err| panic!("Error reading {}: {}", fname, err));
for (j, line) in contents.trim_end_matches('\n').split('\n').enumerate() {
for (i, ch) in line.chars().enumerate() {
chart.put(i as i64, j as i64, ch);
}
}
return chart;
}
pub fn item_at(&self, x: i64, y: i64) -> char {
match self.map.get(&(x,y)) {
Some(x) => *x,
None =>'',
}
}
pub fn locate(&self, ch: char) -> Option<(i64, i64)> {
for (key, val) in self.map.iter() {
if *val == ch {
return Some(*key);
}
}
return None;
}
pub fn follow_portal(&self, x0: i64, y0: i64) -> (i64, i64) {
match self.portals.get(&(x0,y0)) {
Some(&(x, y)) => (x,y),
None => panic!("Tried to follow non-existant portal at ({}, {})", x0, y0),
}
}
pub fn add_portal(&mut self, nest: bool, x0: i64, y0: i64, x1: i64, y1: i64) {
let sym = if nest {
if self.near_border(x0, y0, 4) { 'v' } else { '^' }
} else { '=' };
self.put(x0, y0, sym);
self.portals.insert((x0, y0), (x1, y1));
}
fn near_border(&self, x: i64, y: i64, dist: i64) -> bool {
((x-self.bbox.xmin()).abs() <= dist || (x-self.bbox.xmax()).abs() <= dist)
||
((y-self.bbox.ymin()).abs() <= dist || (y-self.bbox.ymax()).abs() <= dist)
}
pub fn get(&self, x: i64, y: i64) -> Option<&char> { self.map.get(&(x,y)) }
pub fn put(&mut self, x: i64, y: i64, obj: char) {
self.bbox.update(x, y);
self.map.insert((x, y), obj);
}
pub fn shortest_path(&self, x: i64, y: i64, valid: impl Fn(i64, i64, i64, char) -> bool, wanted: impl Fn(i64, i64, i64, char) -> bool) -> Option<(i64, i64, Vec<Direction>)> {
let mut todo = VecDeque::new();
let mut seen = HashSet::new();
todo.push_back((x, y, 0, Vec::new()));
|
loop {
let (x, y, level, path) = todo.pop_front()?; // or return None
let spot = self.item_at(x, y);
if wanted(x, y, level, spot) {
return Some((x, y, path));
}
if valid(x, y, level, spot) {
for dir in Direction::each() {
let (mut a, mut b) = dir.step(x, y);
let mut lvl = match self.item_at(a, b) {
'^' => 1,
'v' => -1,
'=' => 99,
_ => 0,
};
if lvl!= 0 {
let (aa, bb) = self.follow_portal(a, b);
a = aa; b = bb;
if lvl == 99 { lvl = 0; }
}
if!seen.contains(&(a, b, level+lvl)) {
let mut newpath = path.clone();
newpath.push(*dir);
todo.push_back((a, b, level+lvl, newpath));
}
}
}
seen.insert((x, y, level));
};
}
fn map_portals(&mut self, nest: bool) {
fn update_portal(pmap: &mut HashMap<String, Portal>, name: &String, x: i64, y: i64, dir: Direction) {
match pmap.get_mut(name) {
Some(port) => {
port.b = (x, y);
port.b_dir = dir;
},
None => {
pmap.insert(name.clone(), Portal {
a: (x, y), a_dir: dir,
b: (0, 0), b_dir: North,
});
}
}
}
let mut stuff = HashMap::new();
for ((x, y), ch) in self.map.iter() {
match *ch {
'A'..='Z' => {
let mut name = String::new();
name.push(*ch);
// These run left-to-right or top-to-bottom, only pay
// attention when we find when we find the left or top
// letter ().
let ch2 = self.item_at(*x+1, *y);// LR
if 65 <= ch2 as u8 && ch2 as u8 <= 90 {
name.push(ch2);
match self.item_at(*x+2, *y) {
'.' => update_portal(&mut stuff, &name, *x+1, *y, East), // XX.
_ => update_portal(&mut stuff, &name, *x, *y, West), //.XX
}
}
let ch2 = self.item_at(*x, *y+1);// TB
if 65 <= ch2 as u8 && ch2 as u8 <= 90 {
name.push(ch2);
match self.item_at(*x, *y+2) {
'.' => update_portal(&mut stuff, &name, *x, *y+1, North), // Y/Y/.
_ => update_portal(&mut stuff, &name, *x, *y, South), //./Y/Y
}
}
},
_ => (),
}
}
for (name, port) in stuff.iter() {
match name.as_str() {
"AA" | "ZZ" => { // Remove the duplicate, leave just one symbol
let (x, y) = port.a;
let (a, b) = port.a_dir.rev().step(x, y);
self.put(a, b,'');
},
_ => {
let (xa, ya) = port.a;
let (xb, yb) = port.b;
// erase second char
let (a, b) = port.a_dir.rev().step(xa, ya); self.put(a, b,'');
let (a, b) = port.b_dir.rev().step(xb, yb); self.put(a, b,'');
// Portal a -> b
let (a, b) = port.b_dir.step(xb, yb);
self.add_portal(nest, xa, ya, a, b);
// Portal b -> a
let (a, b) = port.a_dir.step(xa, ya);
self.add_portal(nest, xb, yb, a, b);
},
}
}
}
}
impl fmt::Display for Chart {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for y in (self.bbox.ymin()..=self.bbox.ymax()).rev() {
for x in self.bbox.xmin()..=self.bbox.xmax() {
write!(f, "{}", self.item_at(x, y))?;
}
write!(f, "{}", "\n")?;
}
Ok(())
}
}
fn walk(chart: &mut Chart, mut x: i64, mut y: i64, path: &Vec<Direction>) {
let mut level = 0;
let mut was;
let mut portal;
for dir in path {
let (a, b) = dir.step(x, y);
x = a; y = b;
was = chart.item_at(x, y);
portal = false;
if was == '^' { level += 1; portal = true; }
if was == 'v' { level -= 1; portal = true; }
if portal {
let (a, b) = chart.follow_portal(x, y);
x = a; y = b;
}
was = chart.item_at(x, y);
chart.put(x, y, '@');
println!("\x1B[H\x1B[2J{}\n\n Level: {}", chart, level);
thread::sleep(time::Duration::from_millis(30));
chart.put(x, y, was);
}
}
fn main() {
let matches = App::new("Advent of Code 2019, Day 20")
.arg(Arg::with_name("FILE")
.help("Input file to process")
.index(1))
.get_matches();
let fname = matches.value_of("FILE").unwrap_or("20.in");
let mut chart = Chart::load(fname);
chart.map_portals(false);
let (mut x0, mut y0) = chart.locate('A').unwrap_or_else(|| panic!("Can't find start square"));
for dir in Direction::each() {
let (x, y) = dir.step(x0, y0);
if '.' == chart.item_at(x, y) {
x0 = x; y0 = y; break;
}
}
match chart.shortest_path(x0, y0, |_,_,_,ch| ch == '.' || ch == '=', |_,_,_,ch| ch == 'Z') {
Some((x, y, path)) => println!("Part 1: Shortest path from ({}, {}) to ({}, {}) = {}", x0, y0, x, y, path.len()-1),
None => println!("Bummer"),
}
let mut chart = Chart::load(fname);
chart.map_portals(true);
match chart.shortest_path(x0, y0, |_,_,l,ch| l >= 0 && (ch == '.' || ch == '^' || ch == 'v'), |_,_,l,ch| ch == 'Z' && l == 0) {
Some((x, y, path)) => {
// walk(&mut chart, x0, y0, &path);
println!("Part 2: Shortest path from ({}, {}) to ({}, {}) = {}", x0, y0, x, y, path.len()-1);
},
None => println!("Bummer"),
}
}
|
random_line_split
|
|
main.rs
|
extern crate clap;
extern crate intcode;
// Time Start: Sun, 22 Dec 2019 09:23:38 -0500
// Time Finish 1: Sun, 22 Dec 2019 17:56:37 -0500 (8 hours, 32 minutes, 59 seconds)
// Time Finish 2:
// Time Total:
use clap::{Arg, App};
use intcode::util::{BBox,Direction,Direction::*};
use std::collections::HashMap;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::fmt;
use std::fs;
use std::{thread, time};
pub struct Portal {
a: (i64, i64), a_dir: Direction,
b: (i64, i64), b_dir: Direction,
}
pub struct Chart {
pub map: HashMap<(i64, i64), char>,
pub bbox: BBox,
pub portals: HashMap<(i64, i64), (i64, i64)>,
}
impl Chart {
pub fn new() -> Chart {
Chart {
map: HashMap::new(),
bbox: BBox::new(),
portals: HashMap::new(),
}
}
pub fn
|
(fname: &str) -> Chart {
let mut chart = Chart::new();
let contents = fs::read_to_string(fname)
.unwrap_or_else(|err| panic!("Error reading {}: {}", fname, err));
for (j, line) in contents.trim_end_matches('\n').split('\n').enumerate() {
for (i, ch) in line.chars().enumerate() {
chart.put(i as i64, j as i64, ch);
}
}
return chart;
}
pub fn item_at(&self, x: i64, y: i64) -> char {
match self.map.get(&(x,y)) {
Some(x) => *x,
None =>'',
}
}
pub fn locate(&self, ch: char) -> Option<(i64, i64)> {
for (key, val) in self.map.iter() {
if *val == ch {
return Some(*key);
}
}
return None;
}
pub fn follow_portal(&self, x0: i64, y0: i64) -> (i64, i64) {
match self.portals.get(&(x0,y0)) {
Some(&(x, y)) => (x,y),
None => panic!("Tried to follow non-existant portal at ({}, {})", x0, y0),
}
}
pub fn add_portal(&mut self, nest: bool, x0: i64, y0: i64, x1: i64, y1: i64) {
let sym = if nest {
if self.near_border(x0, y0, 4) { 'v' } else { '^' }
} else { '=' };
self.put(x0, y0, sym);
self.portals.insert((x0, y0), (x1, y1));
}
fn near_border(&self, x: i64, y: i64, dist: i64) -> bool {
((x-self.bbox.xmin()).abs() <= dist || (x-self.bbox.xmax()).abs() <= dist)
||
((y-self.bbox.ymin()).abs() <= dist || (y-self.bbox.ymax()).abs() <= dist)
}
pub fn get(&self, x: i64, y: i64) -> Option<&char> { self.map.get(&(x,y)) }
pub fn put(&mut self, x: i64, y: i64, obj: char) {
self.bbox.update(x, y);
self.map.insert((x, y), obj);
}
pub fn shortest_path(&self, x: i64, y: i64, valid: impl Fn(i64, i64, i64, char) -> bool, wanted: impl Fn(i64, i64, i64, char) -> bool) -> Option<(i64, i64, Vec<Direction>)> {
let mut todo = VecDeque::new();
let mut seen = HashSet::new();
todo.push_back((x, y, 0, Vec::new()));
loop {
let (x, y, level, path) = todo.pop_front()?; // or return None
let spot = self.item_at(x, y);
if wanted(x, y, level, spot) {
return Some((x, y, path));
}
if valid(x, y, level, spot) {
for dir in Direction::each() {
let (mut a, mut b) = dir.step(x, y);
let mut lvl = match self.item_at(a, b) {
'^' => 1,
'v' => -1,
'=' => 99,
_ => 0,
};
if lvl!= 0 {
let (aa, bb) = self.follow_portal(a, b);
a = aa; b = bb;
if lvl == 99 { lvl = 0; }
}
if!seen.contains(&(a, b, level+lvl)) {
let mut newpath = path.clone();
newpath.push(*dir);
todo.push_back((a, b, level+lvl, newpath));
}
}
}
seen.insert((x, y, level));
};
}
fn map_portals(&mut self, nest: bool) {
fn update_portal(pmap: &mut HashMap<String, Portal>, name: &String, x: i64, y: i64, dir: Direction) {
match pmap.get_mut(name) {
Some(port) => {
port.b = (x, y);
port.b_dir = dir;
},
None => {
pmap.insert(name.clone(), Portal {
a: (x, y), a_dir: dir,
b: (0, 0), b_dir: North,
});
}
}
}
let mut stuff = HashMap::new();
for ((x, y), ch) in self.map.iter() {
match *ch {
'A'..='Z' => {
let mut name = String::new();
name.push(*ch);
// These run left-to-right or top-to-bottom, only pay
// attention when we find when we find the left or top
// letter ().
let ch2 = self.item_at(*x+1, *y);// LR
if 65 <= ch2 as u8 && ch2 as u8 <= 90 {
name.push(ch2);
match self.item_at(*x+2, *y) {
'.' => update_portal(&mut stuff, &name, *x+1, *y, East), // XX.
_ => update_portal(&mut stuff, &name, *x, *y, West), //.XX
}
}
let ch2 = self.item_at(*x, *y+1);// TB
if 65 <= ch2 as u8 && ch2 as u8 <= 90 {
name.push(ch2);
match self.item_at(*x, *y+2) {
'.' => update_portal(&mut stuff, &name, *x, *y+1, North), // Y/Y/.
_ => update_portal(&mut stuff, &name, *x, *y, South), //./Y/Y
}
}
},
_ => (),
}
}
for (name, port) in stuff.iter() {
match name.as_str() {
"AA" | "ZZ" => { // Remove the duplicate, leave just one symbol
let (x, y) = port.a;
let (a, b) = port.a_dir.rev().step(x, y);
self.put(a, b,'');
},
_ => {
let (xa, ya) = port.a;
let (xb, yb) = port.b;
// erase second char
let (a, b) = port.a_dir.rev().step(xa, ya); self.put(a, b,'');
let (a, b) = port.b_dir.rev().step(xb, yb); self.put(a, b,'');
// Portal a -> b
let (a, b) = port.b_dir.step(xb, yb);
self.add_portal(nest, xa, ya, a, b);
// Portal b -> a
let (a, b) = port.a_dir.step(xa, ya);
self.add_portal(nest, xb, yb, a, b);
},
}
}
}
}
impl fmt::Display for Chart {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for y in (self.bbox.ymin()..=self.bbox.ymax()).rev() {
for x in self.bbox.xmin()..=self.bbox.xmax() {
write!(f, "{}", self.item_at(x, y))?;
}
write!(f, "{}", "\n")?;
}
Ok(())
}
}
fn walk(chart: &mut Chart, mut x: i64, mut y: i64, path: &Vec<Direction>) {
let mut level = 0;
let mut was;
let mut portal;
for dir in path {
let (a, b) = dir.step(x, y);
x = a; y = b;
was = chart.item_at(x, y);
portal = false;
if was == '^' { level += 1; portal = true; }
if was == 'v' { level -= 1; portal = true; }
if portal {
let (a, b) = chart.follow_portal(x, y);
x = a; y = b;
}
was = chart.item_at(x, y);
chart.put(x, y, '@');
println!("\x1B[H\x1B[2J{}\n\n Level: {}", chart, level);
thread::sleep(time::Duration::from_millis(30));
chart.put(x, y, was);
}
}
fn main() {
let matches = App::new("Advent of Code 2019, Day 20")
.arg(Arg::with_name("FILE")
.help("Input file to process")
.index(1))
.get_matches();
let fname = matches.value_of("FILE").unwrap_or("20.in");
let mut chart = Chart::load(fname);
chart.map_portals(false);
let (mut x0, mut y0) = chart.locate('A').unwrap_or_else(|| panic!("Can't find start square"));
for dir in Direction::each() {
let (x, y) = dir.step(x0, y0);
if '.' == chart.item_at(x, y) {
x0 = x; y0 = y; break;
}
}
match chart.shortest_path(x0, y0, |_,_,_,ch| ch == '.' || ch == '=', |_,_,_,ch| ch == 'Z') {
Some((x, y, path)) => println!("Part 1: Shortest path from ({}, {}) to ({}, {}) = {}", x0, y0, x, y, path.len()-1),
None => println!("Bummer"),
}
let mut chart = Chart::load(fname);
chart.map_portals(true);
match chart.shortest_path(x0, y0, |_,_,l,ch| l >= 0 && (ch == '.' || ch == '^' || ch == 'v'), |_,_,l,ch| ch == 'Z' && l == 0) {
Some((x, y, path)) => {
// walk(&mut chart, x0, y0, &path);
println!("Part 2: Shortest path from ({}, {}) to ({}, {}) = {}", x0, y0, x, y, path.len()-1);
},
None => println!("Bummer"),
}
}
|
load
|
identifier_name
|
main.rs
|
extern crate clap;
extern crate intcode;
// Time Start: Sun, 22 Dec 2019 09:23:38 -0500
// Time Finish 1: Sun, 22 Dec 2019 17:56:37 -0500 (8 hours, 32 minutes, 59 seconds)
// Time Finish 2:
// Time Total:
use clap::{Arg, App};
use intcode::util::{BBox,Direction,Direction::*};
use std::collections::HashMap;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::fmt;
use std::fs;
use std::{thread, time};
pub struct Portal {
a: (i64, i64), a_dir: Direction,
b: (i64, i64), b_dir: Direction,
}
pub struct Chart {
pub map: HashMap<(i64, i64), char>,
pub bbox: BBox,
pub portals: HashMap<(i64, i64), (i64, i64)>,
}
impl Chart {
pub fn new() -> Chart {
Chart {
map: HashMap::new(),
bbox: BBox::new(),
portals: HashMap::new(),
}
}
pub fn load(fname: &str) -> Chart {
let mut chart = Chart::new();
let contents = fs::read_to_string(fname)
.unwrap_or_else(|err| panic!("Error reading {}: {}", fname, err));
for (j, line) in contents.trim_end_matches('\n').split('\n').enumerate() {
for (i, ch) in line.chars().enumerate() {
chart.put(i as i64, j as i64, ch);
}
}
return chart;
}
pub fn item_at(&self, x: i64, y: i64) -> char {
match self.map.get(&(x,y)) {
Some(x) => *x,
None =>'',
}
}
pub fn locate(&self, ch: char) -> Option<(i64, i64)> {
for (key, val) in self.map.iter() {
if *val == ch {
return Some(*key);
}
}
return None;
}
pub fn follow_portal(&self, x0: i64, y0: i64) -> (i64, i64) {
match self.portals.get(&(x0,y0)) {
Some(&(x, y)) => (x,y),
None => panic!("Tried to follow non-existant portal at ({}, {})", x0, y0),
}
}
pub fn add_portal(&mut self, nest: bool, x0: i64, y0: i64, x1: i64, y1: i64) {
let sym = if nest {
if self.near_border(x0, y0, 4) { 'v' } else { '^' }
} else
|
;
self.put(x0, y0, sym);
self.portals.insert((x0, y0), (x1, y1));
}
fn near_border(&self, x: i64, y: i64, dist: i64) -> bool {
((x-self.bbox.xmin()).abs() <= dist || (x-self.bbox.xmax()).abs() <= dist)
||
((y-self.bbox.ymin()).abs() <= dist || (y-self.bbox.ymax()).abs() <= dist)
}
pub fn get(&self, x: i64, y: i64) -> Option<&char> { self.map.get(&(x,y)) }
pub fn put(&mut self, x: i64, y: i64, obj: char) {
self.bbox.update(x, y);
self.map.insert((x, y), obj);
}
pub fn shortest_path(&self, x: i64, y: i64, valid: impl Fn(i64, i64, i64, char) -> bool, wanted: impl Fn(i64, i64, i64, char) -> bool) -> Option<(i64, i64, Vec<Direction>)> {
let mut todo = VecDeque::new();
let mut seen = HashSet::new();
todo.push_back((x, y, 0, Vec::new()));
loop {
let (x, y, level, path) = todo.pop_front()?; // or return None
let spot = self.item_at(x, y);
if wanted(x, y, level, spot) {
return Some((x, y, path));
}
if valid(x, y, level, spot) {
for dir in Direction::each() {
let (mut a, mut b) = dir.step(x, y);
let mut lvl = match self.item_at(a, b) {
'^' => 1,
'v' => -1,
'=' => 99,
_ => 0,
};
if lvl!= 0 {
let (aa, bb) = self.follow_portal(a, b);
a = aa; b = bb;
if lvl == 99 { lvl = 0; }
}
if!seen.contains(&(a, b, level+lvl)) {
let mut newpath = path.clone();
newpath.push(*dir);
todo.push_back((a, b, level+lvl, newpath));
}
}
}
seen.insert((x, y, level));
};
}
fn map_portals(&mut self, nest: bool) {
fn update_portal(pmap: &mut HashMap<String, Portal>, name: &String, x: i64, y: i64, dir: Direction) {
match pmap.get_mut(name) {
Some(port) => {
port.b = (x, y);
port.b_dir = dir;
},
None => {
pmap.insert(name.clone(), Portal {
a: (x, y), a_dir: dir,
b: (0, 0), b_dir: North,
});
}
}
}
let mut stuff = HashMap::new();
for ((x, y), ch) in self.map.iter() {
match *ch {
'A'..='Z' => {
let mut name = String::new();
name.push(*ch);
// These run left-to-right or top-to-bottom, only pay
// attention when we find when we find the left or top
// letter ().
let ch2 = self.item_at(*x+1, *y);// LR
if 65 <= ch2 as u8 && ch2 as u8 <= 90 {
name.push(ch2);
match self.item_at(*x+2, *y) {
'.' => update_portal(&mut stuff, &name, *x+1, *y, East), // XX.
_ => update_portal(&mut stuff, &name, *x, *y, West), //.XX
}
}
let ch2 = self.item_at(*x, *y+1);// TB
if 65 <= ch2 as u8 && ch2 as u8 <= 90 {
name.push(ch2);
match self.item_at(*x, *y+2) {
'.' => update_portal(&mut stuff, &name, *x, *y+1, North), // Y/Y/.
_ => update_portal(&mut stuff, &name, *x, *y, South), //./Y/Y
}
}
},
_ => (),
}
}
for (name, port) in stuff.iter() {
match name.as_str() {
"AA" | "ZZ" => { // Remove the duplicate, leave just one symbol
let (x, y) = port.a;
let (a, b) = port.a_dir.rev().step(x, y);
self.put(a, b,'');
},
_ => {
let (xa, ya) = port.a;
let (xb, yb) = port.b;
// erase second char
let (a, b) = port.a_dir.rev().step(xa, ya); self.put(a, b,'');
let (a, b) = port.b_dir.rev().step(xb, yb); self.put(a, b,'');
// Portal a -> b
let (a, b) = port.b_dir.step(xb, yb);
self.add_portal(nest, xa, ya, a, b);
// Portal b -> a
let (a, b) = port.a_dir.step(xa, ya);
self.add_portal(nest, xb, yb, a, b);
},
}
}
}
}
impl fmt::Display for Chart {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for y in (self.bbox.ymin()..=self.bbox.ymax()).rev() {
for x in self.bbox.xmin()..=self.bbox.xmax() {
write!(f, "{}", self.item_at(x, y))?;
}
write!(f, "{}", "\n")?;
}
Ok(())
}
}
fn walk(chart: &mut Chart, mut x: i64, mut y: i64, path: &Vec<Direction>) {
let mut level = 0;
let mut was;
let mut portal;
for dir in path {
let (a, b) = dir.step(x, y);
x = a; y = b;
was = chart.item_at(x, y);
portal = false;
if was == '^' { level += 1; portal = true; }
if was == 'v' { level -= 1; portal = true; }
if portal {
let (a, b) = chart.follow_portal(x, y);
x = a; y = b;
}
was = chart.item_at(x, y);
chart.put(x, y, '@');
println!("\x1B[H\x1B[2J{}\n\n Level: {}", chart, level);
thread::sleep(time::Duration::from_millis(30));
chart.put(x, y, was);
}
}
fn main() {
let matches = App::new("Advent of Code 2019, Day 20")
.arg(Arg::with_name("FILE")
.help("Input file to process")
.index(1))
.get_matches();
let fname = matches.value_of("FILE").unwrap_or("20.in");
let mut chart = Chart::load(fname);
chart.map_portals(false);
let (mut x0, mut y0) = chart.locate('A').unwrap_or_else(|| panic!("Can't find start square"));
for dir in Direction::each() {
let (x, y) = dir.step(x0, y0);
if '.' == chart.item_at(x, y) {
x0 = x; y0 = y; break;
}
}
match chart.shortest_path(x0, y0, |_,_,_,ch| ch == '.' || ch == '=', |_,_,_,ch| ch == 'Z') {
Some((x, y, path)) => println!("Part 1: Shortest path from ({}, {}) to ({}, {}) = {}", x0, y0, x, y, path.len()-1),
None => println!("Bummer"),
}
let mut chart = Chart::load(fname);
chart.map_portals(true);
match chart.shortest_path(x0, y0, |_,_,l,ch| l >= 0 && (ch == '.' || ch == '^' || ch == 'v'), |_,_,l,ch| ch == 'Z' && l == 0) {
Some((x, y, path)) => {
// walk(&mut chart, x0, y0, &path);
println!("Part 2: Shortest path from ({}, {}) to ({}, {}) = {}", x0, y0, x, y, path.len()-1);
},
None => println!("Bummer"),
}
}
|
{ '=' }
|
conditional_block
|
cond_chain.rs
|
use super::*;
use super::super::sim::*;
/// Condition list in the form of an AST node
///
/// This is a reimplementation of `CondList`, as an enum with chained
/// nodes. This node has the same expressiveness as the `CondList`,
/// but has the advantage that it _can_ do crossover splicing between
/// parents in the current evolutionary framework.
///
/// A downside of this node type is that the programs tend to grow
/// without bounds, as there is no height control on the crossover
/// operation and crossovers tend to be done most of the time. This
/// leads to a population that quickly grows outside bounds, reducing
/// evolutionary and computational performance.
#[derive(Debug,RustcDecodable,RustcEncodable,Clone,PartialEq)]
pub enum CondChain {
Cond(Box<Condition>, Box<Command>, Box<CondChain>),
Stop
}
impl_astnode!(CondChain, 5,
int Cond(cond, then, next),
leaf Stop());
impl EvaluateToCommand for CondChain {
fn
|
(&self, sensor_data: &SensorData) -> Command {
match *self {
CondChain::Cond(ref cond, ref then, ref next) => {
if cond.is_true(sensor_data) {
**then
} else {
next.evaluate(sensor_data)
}
},
CondChain::Stop => Command::Skip
}
}
}
|
evaluate
|
identifier_name
|
cond_chain.rs
|
use super::*;
use super::super::sim::*;
/// Condition list in the form of an AST node
///
/// This is a reimplementation of `CondList`, as an enum with chained
/// nodes. This node has the same expressiveness as the `CondList`,
/// but has the advantage that it _can_ do crossover splicing between
/// parents in the current evolutionary framework.
///
/// A downside of this node type is that the programs tend to grow
/// without bounds, as there is no height control on the crossover
/// operation and crossovers tend to be done most of the time. This
/// leads to a population that quickly grows outside bounds, reducing
/// evolutionary and computational performance.
#[derive(Debug,RustcDecodable,RustcEncodable,Clone,PartialEq)]
pub enum CondChain {
Cond(Box<Condition>, Box<Command>, Box<CondChain>),
Stop
}
impl_astnode!(CondChain, 5,
int Cond(cond, then, next),
leaf Stop());
impl EvaluateToCommand for CondChain {
fn evaluate(&self, sensor_data: &SensorData) -> Command {
match *self {
CondChain::Cond(ref cond, ref then, ref next) => {
if cond.is_true(sensor_data) {
**then
} else
|
},
CondChain::Stop => Command::Skip
}
}
}
|
{
next.evaluate(sensor_data)
}
|
conditional_block
|
cond_chain.rs
|
use super::*;
use super::super::sim::*;
|
/// but has the advantage that it _can_ do crossover splicing between
/// parents in the current evolutionary framework.
///
/// A downside of this node type is that the programs tend to grow
/// without bounds, as there is no height control on the crossover
/// operation and crossovers tend to be done most of the time. This
/// leads to a population that quickly grows outside bounds, reducing
/// evolutionary and computational performance.
#[derive(Debug,RustcDecodable,RustcEncodable,Clone,PartialEq)]
pub enum CondChain {
Cond(Box<Condition>, Box<Command>, Box<CondChain>),
Stop
}
impl_astnode!(CondChain, 5,
int Cond(cond, then, next),
leaf Stop());
impl EvaluateToCommand for CondChain {
fn evaluate(&self, sensor_data: &SensorData) -> Command {
match *self {
CondChain::Cond(ref cond, ref then, ref next) => {
if cond.is_true(sensor_data) {
**then
} else {
next.evaluate(sensor_data)
}
},
CondChain::Stop => Command::Skip
}
}
}
|
/// Condition list in the form of an AST node
///
/// This is a reimplementation of `CondList`, as an enum with chained
/// nodes. This node has the same expressiveness as the `CondList`,
|
random_line_split
|
partition_slice.rs
|
//! Rearrange the elements in a slice according to a predicate.
/// Rearrange the elements of the mutable slice `s` such that elements where `p(t)` is true precede
/// the elements where `p(t)` is false.
///
/// The order of elements is not preserved, unless the slice is already partitioned.
///
/// Returns the number of elements where `p(t)` is true.
pub fn partition_slice<T, F>(s: &mut [T], mut p: F) -> usize
where
F: FnMut(&T) -> bool,
{
// Count the length of the prefix where `p` returns true.
let mut count = match s.iter().position(|t|!p(t)) {
Some(t) => t,
None => return s.len(),
};
// Swap remaining `true` elements into place.
//
// This actually preserves the order of the `true` elements, but the `false` elements get
// shuffled.
for i in count + 1..s.len() {
if p(&s[i]) {
s.swap(count, i);
count += 1;
}
}
count
}
#[cfg(test)]
mod tests {
use super::partition_slice;
use std::vec::Vec;
fn check(x: &[u32], want: &[u32]) {
assert_eq!(x.len(), want.len());
let want_count = want.iter().cloned().filter(|&x| x % 10 == 0).count();
let mut v = Vec::new();
v.extend(x.iter().cloned());
let count = partition_slice(&mut v[..], |&x| x % 10 == 0);
assert_eq!(v, want);
assert_eq!(count, want_count);
}
#[test]
fn empty() {
check(&[], &[]);
}
#[test]
fn singles() {
check(&[0], &[0]);
check(&[1], &[1]);
check(&[10], &[10]);
}
#[test]
fn doubles()
|
#[test]
fn longer() {
check(&[1, 2, 3], &[1, 2, 3]);
check(&[1, 2, 10], &[10, 2, 1]); // Note: 2, 1 order not required.
check(&[1, 10, 2], &[10, 1, 2]); // Note: 1, 2 order not required.
check(&[1, 20, 10], &[20, 10, 1]);
check(&[1, 20, 3, 10], &[20, 10, 3, 1]);
check(&[20, 3, 10, 1], &[20, 10, 3, 1]);
}
}
|
{
check(&[0, 0], &[0, 0]);
check(&[0, 5], &[0, 5]);
check(&[5, 0], &[0, 5]);
check(&[5, 4], &[5, 4]);
}
|
identifier_body
|
partition_slice.rs
|
//! Rearrange the elements in a slice according to a predicate.
/// Rearrange the elements of the mutable slice `s` such that elements where `p(t)` is true precede
/// the elements where `p(t)` is false.
///
/// The order of elements is not preserved, unless the slice is already partitioned.
///
/// Returns the number of elements where `p(t)` is true.
pub fn
|
<T, F>(s: &mut [T], mut p: F) -> usize
where
F: FnMut(&T) -> bool,
{
// Count the length of the prefix where `p` returns true.
let mut count = match s.iter().position(|t|!p(t)) {
Some(t) => t,
None => return s.len(),
};
// Swap remaining `true` elements into place.
//
// This actually preserves the order of the `true` elements, but the `false` elements get
// shuffled.
for i in count + 1..s.len() {
if p(&s[i]) {
s.swap(count, i);
count += 1;
}
}
count
}
#[cfg(test)]
mod tests {
use super::partition_slice;
use std::vec::Vec;
fn check(x: &[u32], want: &[u32]) {
assert_eq!(x.len(), want.len());
let want_count = want.iter().cloned().filter(|&x| x % 10 == 0).count();
let mut v = Vec::new();
v.extend(x.iter().cloned());
let count = partition_slice(&mut v[..], |&x| x % 10 == 0);
assert_eq!(v, want);
assert_eq!(count, want_count);
}
#[test]
fn empty() {
check(&[], &[]);
}
#[test]
fn singles() {
check(&[0], &[0]);
check(&[1], &[1]);
check(&[10], &[10]);
}
#[test]
fn doubles() {
check(&[0, 0], &[0, 0]);
check(&[0, 5], &[0, 5]);
check(&[5, 0], &[0, 5]);
check(&[5, 4], &[5, 4]);
}
#[test]
fn longer() {
check(&[1, 2, 3], &[1, 2, 3]);
check(&[1, 2, 10], &[10, 2, 1]); // Note: 2, 1 order not required.
check(&[1, 10, 2], &[10, 1, 2]); // Note: 1, 2 order not required.
check(&[1, 20, 10], &[20, 10, 1]);
check(&[1, 20, 3, 10], &[20, 10, 3, 1]);
check(&[20, 3, 10, 1], &[20, 10, 3, 1]);
}
}
|
partition_slice
|
identifier_name
|
partition_slice.rs
|
//! Rearrange the elements in a slice according to a predicate.
/// Rearrange the elements of the mutable slice `s` such that elements where `p(t)` is true precede
/// the elements where `p(t)` is false.
///
/// The order of elements is not preserved, unless the slice is already partitioned.
///
/// Returns the number of elements where `p(t)` is true.
pub fn partition_slice<T, F>(s: &mut [T], mut p: F) -> usize
where
F: FnMut(&T) -> bool,
{
// Count the length of the prefix where `p` returns true.
let mut count = match s.iter().position(|t|!p(t)) {
Some(t) => t,
None => return s.len(),
};
// Swap remaining `true` elements into place.
//
|
// shuffled.
for i in count + 1..s.len() {
if p(&s[i]) {
s.swap(count, i);
count += 1;
}
}
count
}
#[cfg(test)]
mod tests {
use super::partition_slice;
use std::vec::Vec;
fn check(x: &[u32], want: &[u32]) {
assert_eq!(x.len(), want.len());
let want_count = want.iter().cloned().filter(|&x| x % 10 == 0).count();
let mut v = Vec::new();
v.extend(x.iter().cloned());
let count = partition_slice(&mut v[..], |&x| x % 10 == 0);
assert_eq!(v, want);
assert_eq!(count, want_count);
}
#[test]
fn empty() {
check(&[], &[]);
}
#[test]
fn singles() {
check(&[0], &[0]);
check(&[1], &[1]);
check(&[10], &[10]);
}
#[test]
fn doubles() {
check(&[0, 0], &[0, 0]);
check(&[0, 5], &[0, 5]);
check(&[5, 0], &[0, 5]);
check(&[5, 4], &[5, 4]);
}
#[test]
fn longer() {
check(&[1, 2, 3], &[1, 2, 3]);
check(&[1, 2, 10], &[10, 2, 1]); // Note: 2, 1 order not required.
check(&[1, 10, 2], &[10, 1, 2]); // Note: 1, 2 order not required.
check(&[1, 20, 10], &[20, 10, 1]);
check(&[1, 20, 3, 10], &[20, 10, 3, 1]);
check(&[20, 3, 10, 1], &[20, 10, 3, 1]);
}
}
|
// This actually preserves the order of the `true` elements, but the `false` elements get
|
random_line_split
|
continued_fraction_from_rational.rs
|
// http://rosettacode.org/wiki/Continued_fraction/Arithmetic/Construct_from_rational_number
struct
|
{
n1: i64,
n2: i64,
}
// This iterator generates the continued fraction representation from the
// specified rational number.
impl Iterator for R2cf {
type Item = i64;
fn next(&mut self) -> Option<i64> {
if self.n2 == 0 {
None
} else {
let t1 = self.n1 / self.n2;
let t2 = self.n2;
self.n2 = self.n1 - t1 * t2;
self.n1 = t2;
Some(t1)
}
}
}
fn r2cf(n1: i64, n2: i64) -> R2cf {
R2cf { n1: n1, n2: n2 }
}
macro_rules! printcf {
($x:expr, $y:expr) => (println!("{:?}", r2cf($x, $y).collect::<Vec<_>>()));
}
fn main() {
printcf!(1, 2);
printcf!(3, 1);
printcf!(23, 8);
printcf!(13, 11);
printcf!(22, 7);
printcf!(-152, 77);
printcf!(14_142, 10_000);
printcf!(141_421, 100_000);
printcf!(1_414_214, 1_000_000);
printcf!(14_142_136, 10_000_000);
printcf!(31, 10);
printcf!(314, 100);
printcf!(3142, 1000);
printcf!(31_428, 10_000);
printcf!(314_285, 100_000);
printcf!(3_142_857, 1_000_000);
printcf!(31_428_571, 10_000_000);
printcf!(314_285_714, 100_000_000);
}
#[cfg(test)]
mod tests {
use std::iter::Iterator;
use super::r2cf;
#[test]
fn test_misc() {
assert!(Iterator::eq(r2cf(-151, 77), vec![-1, -1, -24, -1, -2]));
assert!(Iterator::eq(r2cf(22, 7), vec![3, 7]));
assert!(Iterator::eq(r2cf(23, 8), vec![2, 1, 7]));
}
#[test]
fn test_sqrt2() {
assert!(Iterator::eq(r2cf(14_142, 10_000), vec![1, 2, 2, 2, 2, 2, 1, 1, 29]));
assert!(Iterator::eq(r2cf(14_142_136, 10_000_000),
vec![1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 6, 1, 2, 4, 1, 1, 2]));
}
#[test]
fn test_pi() {
assert!(Iterator::eq(r2cf(31, 10), vec![3, 10]));
assert!(Iterator::eq(r2cf(314, 100), vec![3, 7, 7]));
assert!(Iterator::eq(r2cf(3_142, 1_000), vec![3, 7, 23, 1, 2]));
}
}
|
R2cf
|
identifier_name
|
continued_fraction_from_rational.rs
|
// http://rosettacode.org/wiki/Continued_fraction/Arithmetic/Construct_from_rational_number
struct R2cf {
n1: i64,
n2: i64,
}
// This iterator generates the continued fraction representation from the
// specified rational number.
impl Iterator for R2cf {
type Item = i64;
fn next(&mut self) -> Option<i64> {
if self.n2 == 0 {
None
} else {
let t1 = self.n1 / self.n2;
let t2 = self.n2;
|
}
fn r2cf(n1: i64, n2: i64) -> R2cf {
R2cf { n1: n1, n2: n2 }
}
macro_rules! printcf {
($x:expr, $y:expr) => (println!("{:?}", r2cf($x, $y).collect::<Vec<_>>()));
}
fn main() {
printcf!(1, 2);
printcf!(3, 1);
printcf!(23, 8);
printcf!(13, 11);
printcf!(22, 7);
printcf!(-152, 77);
printcf!(14_142, 10_000);
printcf!(141_421, 100_000);
printcf!(1_414_214, 1_000_000);
printcf!(14_142_136, 10_000_000);
printcf!(31, 10);
printcf!(314, 100);
printcf!(3142, 1000);
printcf!(31_428, 10_000);
printcf!(314_285, 100_000);
printcf!(3_142_857, 1_000_000);
printcf!(31_428_571, 10_000_000);
printcf!(314_285_714, 100_000_000);
}
#[cfg(test)]
mod tests {
use std::iter::Iterator;
use super::r2cf;
#[test]
fn test_misc() {
assert!(Iterator::eq(r2cf(-151, 77), vec![-1, -1, -24, -1, -2]));
assert!(Iterator::eq(r2cf(22, 7), vec![3, 7]));
assert!(Iterator::eq(r2cf(23, 8), vec![2, 1, 7]));
}
#[test]
fn test_sqrt2() {
assert!(Iterator::eq(r2cf(14_142, 10_000), vec![1, 2, 2, 2, 2, 2, 1, 1, 29]));
assert!(Iterator::eq(r2cf(14_142_136, 10_000_000),
vec![1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 6, 1, 2, 4, 1, 1, 2]));
}
#[test]
fn test_pi() {
assert!(Iterator::eq(r2cf(31, 10), vec![3, 10]));
assert!(Iterator::eq(r2cf(314, 100), vec![3, 7, 7]));
assert!(Iterator::eq(r2cf(3_142, 1_000), vec![3, 7, 23, 1, 2]));
}
}
|
self.n2 = self.n1 - t1 * t2;
self.n1 = t2;
Some(t1)
}
}
|
random_line_split
|
continued_fraction_from_rational.rs
|
// http://rosettacode.org/wiki/Continued_fraction/Arithmetic/Construct_from_rational_number
struct R2cf {
n1: i64,
n2: i64,
}
// This iterator generates the continued fraction representation from the
// specified rational number.
impl Iterator for R2cf {
type Item = i64;
fn next(&mut self) -> Option<i64> {
if self.n2 == 0 {
None
} else {
let t1 = self.n1 / self.n2;
let t2 = self.n2;
self.n2 = self.n1 - t1 * t2;
self.n1 = t2;
Some(t1)
}
}
}
fn r2cf(n1: i64, n2: i64) -> R2cf
|
macro_rules! printcf {
($x:expr, $y:expr) => (println!("{:?}", r2cf($x, $y).collect::<Vec<_>>()));
}
fn main() {
printcf!(1, 2);
printcf!(3, 1);
printcf!(23, 8);
printcf!(13, 11);
printcf!(22, 7);
printcf!(-152, 77);
printcf!(14_142, 10_000);
printcf!(141_421, 100_000);
printcf!(1_414_214, 1_000_000);
printcf!(14_142_136, 10_000_000);
printcf!(31, 10);
printcf!(314, 100);
printcf!(3142, 1000);
printcf!(31_428, 10_000);
printcf!(314_285, 100_000);
printcf!(3_142_857, 1_000_000);
printcf!(31_428_571, 10_000_000);
printcf!(314_285_714, 100_000_000);
}
#[cfg(test)]
mod tests {
use std::iter::Iterator;
use super::r2cf;
#[test]
fn test_misc() {
assert!(Iterator::eq(r2cf(-151, 77), vec![-1, -1, -24, -1, -2]));
assert!(Iterator::eq(r2cf(22, 7), vec![3, 7]));
assert!(Iterator::eq(r2cf(23, 8), vec![2, 1, 7]));
}
#[test]
fn test_sqrt2() {
assert!(Iterator::eq(r2cf(14_142, 10_000), vec![1, 2, 2, 2, 2, 2, 1, 1, 29]));
assert!(Iterator::eq(r2cf(14_142_136, 10_000_000),
vec![1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 6, 1, 2, 4, 1, 1, 2]));
}
#[test]
fn test_pi() {
assert!(Iterator::eq(r2cf(31, 10), vec![3, 10]));
assert!(Iterator::eq(r2cf(314, 100), vec![3, 7, 7]));
assert!(Iterator::eq(r2cf(3_142, 1_000), vec![3, 7, 23, 1, 2]));
}
}
|
{
R2cf { n1: n1, n2: n2 }
}
|
identifier_body
|
continued_fraction_from_rational.rs
|
// http://rosettacode.org/wiki/Continued_fraction/Arithmetic/Construct_from_rational_number
struct R2cf {
n1: i64,
n2: i64,
}
// This iterator generates the continued fraction representation from the
// specified rational number.
impl Iterator for R2cf {
type Item = i64;
fn next(&mut self) -> Option<i64> {
if self.n2 == 0
|
else {
let t1 = self.n1 / self.n2;
let t2 = self.n2;
self.n2 = self.n1 - t1 * t2;
self.n1 = t2;
Some(t1)
}
}
}
fn r2cf(n1: i64, n2: i64) -> R2cf {
R2cf { n1: n1, n2: n2 }
}
macro_rules! printcf {
($x:expr, $y:expr) => (println!("{:?}", r2cf($x, $y).collect::<Vec<_>>()));
}
fn main() {
printcf!(1, 2);
printcf!(3, 1);
printcf!(23, 8);
printcf!(13, 11);
printcf!(22, 7);
printcf!(-152, 77);
printcf!(14_142, 10_000);
printcf!(141_421, 100_000);
printcf!(1_414_214, 1_000_000);
printcf!(14_142_136, 10_000_000);
printcf!(31, 10);
printcf!(314, 100);
printcf!(3142, 1000);
printcf!(31_428, 10_000);
printcf!(314_285, 100_000);
printcf!(3_142_857, 1_000_000);
printcf!(31_428_571, 10_000_000);
printcf!(314_285_714, 100_000_000);
}
#[cfg(test)]
mod tests {
use std::iter::Iterator;
use super::r2cf;
#[test]
fn test_misc() {
assert!(Iterator::eq(r2cf(-151, 77), vec![-1, -1, -24, -1, -2]));
assert!(Iterator::eq(r2cf(22, 7), vec![3, 7]));
assert!(Iterator::eq(r2cf(23, 8), vec![2, 1, 7]));
}
#[test]
fn test_sqrt2() {
assert!(Iterator::eq(r2cf(14_142, 10_000), vec![1, 2, 2, 2, 2, 2, 1, 1, 29]));
assert!(Iterator::eq(r2cf(14_142_136, 10_000_000),
vec![1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 6, 1, 2, 4, 1, 1, 2]));
}
#[test]
fn test_pi() {
assert!(Iterator::eq(r2cf(31, 10), vec![3, 10]));
assert!(Iterator::eq(r2cf(314, 100), vec![3, 7, 7]));
assert!(Iterator::eq(r2cf(3_142, 1_000), vec![3, 7, 23, 1, 2]));
}
}
|
{
None
}
|
conditional_block
|
near_parabolic.rs
|
/*
Copyright (c) 2015, 2016 Saurav Sachidanand
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
extern crate astro;
use astro::*;
#[test]
fn true_anom_and_rad_vec()
|
{
let (tru_anom, rad_vec) = orbit::near_parabolic::true_anom_and_rad_vec (
138.4783, 0.0, 1.0, 0.921326, 0.0000001
).unwrap();
assert_eq!(util::round_upto_digits(tru_anom.to_degrees(), 5), 102.74426);
assert_eq!(util::round_upto_digits(rad_vec, 6), 2.364192);
}
|
identifier_body
|
|
near_parabolic.rs
|
/*
Copyright (c) 2015, 2016 Saurav Sachidanand
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
extern crate astro;
use astro::*;
#[test]
fn
|
() {
let (tru_anom, rad_vec) = orbit::near_parabolic::true_anom_and_rad_vec (
138.4783, 0.0, 1.0, 0.921326, 0.0000001
).unwrap();
assert_eq!(util::round_upto_digits(tru_anom.to_degrees(), 5), 102.74426);
assert_eq!(util::round_upto_digits(rad_vec, 6), 2.364192);
}
|
true_anom_and_rad_vec
|
identifier_name
|
near_parabolic.rs
|
/*
Copyright (c) 2015, 2016 Saurav Sachidanand
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
extern crate astro;
use astro::*;
#[test]
fn true_anom_and_rad_vec() {
let (tru_anom, rad_vec) = orbit::near_parabolic::true_anom_and_rad_vec (
138.4783, 0.0, 1.0, 0.921326, 0.0000001
).unwrap();
assert_eq!(util::round_upto_digits(tru_anom.to_degrees(), 5), 102.74426);
assert_eq!(util::round_upto_digits(rad_vec, 6), 2.364192);
|
}
|
random_line_split
|
|
inherited_stability.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_name="inherited_stability"]
#![crate_type = "lib"]
#![unstable(feature = "unstable_test_feature", issue = "0")]
#![feature(staged_api)]
pub fn unstable() {}
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stable() {}
#[stable(feature = "rust1", since = "1.0.0")]
pub mod stable_mod {
#[unstable(feature = "unstable_test_feature", issue = "0")]
pub fn
|
() {}
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stable() {}
}
#[unstable(feature = "unstable_test_feature", issue = "0")]
pub mod unstable_mod {
#[stable(feature = "stable_test_feature", since = "1.0.0")]
#[rustc_deprecated(since = "1.0.0", reason = "text")]
pub fn deprecated() {}
pub fn unstable() {}
}
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Stable {
#[unstable(feature = "unstable_test_feature", issue = "0")]
fn unstable(&self);
#[stable(feature = "rust1", since = "1.0.0")]
fn stable(&self);
}
impl Stable for usize {
fn unstable(&self) {}
fn stable(&self) {}
}
pub enum Unstable {
UnstableVariant,
#[stable(feature = "rust1", since = "1.0.0")]
StableVariant
}
|
unstable
|
identifier_name
|
inherited_stability.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_name="inherited_stability"]
#![crate_type = "lib"]
#![unstable(feature = "unstable_test_feature", issue = "0")]
#![feature(staged_api)]
pub fn unstable() {}
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stable() {}
#[stable(feature = "rust1", since = "1.0.0")]
pub mod stable_mod {
#[unstable(feature = "unstable_test_feature", issue = "0")]
pub fn unstable() {}
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stable() {}
}
#[unstable(feature = "unstable_test_feature", issue = "0")]
pub mod unstable_mod {
#[stable(feature = "stable_test_feature", since = "1.0.0")]
#[rustc_deprecated(since = "1.0.0", reason = "text")]
pub fn deprecated() {}
pub fn unstable() {}
}
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Stable {
#[unstable(feature = "unstable_test_feature", issue = "0")]
fn unstable(&self);
#[stable(feature = "rust1", since = "1.0.0")]
fn stable(&self);
}
impl Stable for usize {
fn unstable(&self) {}
fn stable(&self)
|
}
pub enum Unstable {
UnstableVariant,
#[stable(feature = "rust1", since = "1.0.0")]
StableVariant
}
|
{}
|
identifier_body
|
inherited_stability.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
#![crate_type = "lib"]
#![unstable(feature = "unstable_test_feature", issue = "0")]
#![feature(staged_api)]
pub fn unstable() {}
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stable() {}
#[stable(feature = "rust1", since = "1.0.0")]
pub mod stable_mod {
#[unstable(feature = "unstable_test_feature", issue = "0")]
pub fn unstable() {}
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stable() {}
}
#[unstable(feature = "unstable_test_feature", issue = "0")]
pub mod unstable_mod {
#[stable(feature = "stable_test_feature", since = "1.0.0")]
#[rustc_deprecated(since = "1.0.0", reason = "text")]
pub fn deprecated() {}
pub fn unstable() {}
}
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Stable {
#[unstable(feature = "unstable_test_feature", issue = "0")]
fn unstable(&self);
#[stable(feature = "rust1", since = "1.0.0")]
fn stable(&self);
}
impl Stable for usize {
fn unstable(&self) {}
fn stable(&self) {}
}
pub enum Unstable {
UnstableVariant,
#[stable(feature = "rust1", since = "1.0.0")]
StableVariant
}
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_name="inherited_stability"]
|
random_line_split
|
klog.rs
|
use fs::{KScheme, Resource, Url};
use fs::resource::ResourceSeek;
use collections::string::String;
use alloc::boxed::Box;
use system::error::Result;
use logging::LogLevel;
/// The kernel log scheme.
pub struct KlogScheme;
impl KScheme for KlogScheme {
/// Returns the name of the scheme: "klog"
fn scheme(&self) -> &str {
"klog"
}
/// Returns a resource. The `url` and `flags` arguments are currently unused.
fn open(&mut self, _: Url, _: usize) -> Result<Box<Resource>> {
Ok(Box::new(KlogResource {
pos: 0,
}))
}
/// Clears the logs.
fn unlink(&mut self, _: Url) -> Result<()> {
let mut logs = ::env().logs.lock();
logs.clear();
Ok(())
}
}
/// The kernel log resource.
pub struct KlogResource {
pos: usize,
}
impl KlogResource {
fn get_log_str(&self) -> String {
let ref mut logs = *::env().logs.lock();
let mut string = String::new();
for &mut (ref level, ref message) in logs {
let prefix: &str = match *level {
LogLevel::Debug => "DEBUG ",
LogLevel::Info => "INFO ",
LogLevel::Warning => "WARN ",
|
LogLevel::Critical => "CRIT ",
};
string.push_str(prefix);
string.push_str(message);
string.push('\n');
}
string
}
}
impl Resource for KlogResource {
fn dup(&self) -> Result<Box<Resource>> {
Ok(Box::new(KlogResource {
pos: self.pos,
}))
}
/// Fills `buf` with the kernel log. Each message is prefixed by its log level:
/// - `CRIT`
/// - `ERROR`
/// - `WARN`
/// - `INFO`
/// - `DEBUG`
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
let mut i = 0;
let logs = self.get_log_str();
while i < buf.len() && self.pos < logs.bytes().count() {
match logs.bytes().nth(self.pos) {
Some(c) => buf[i] = c,
None => ()
}
i += 1;
self.pos += 1;
}
Ok(i)
}
fn seek(&mut self, pos: ResourceSeek) -> Result<usize> {
match pos {
ResourceSeek::Start(offset) => self.pos = offset as usize,
ResourceSeek::Current(offset) => self.pos += offset as usize,
ResourceSeek::End(offset) => {
let logs = self.get_log_str();
self.pos = (logs.bytes().count() as isize + offset) as usize;
}
}
Ok(self.pos)
}
}
|
LogLevel::Error => "ERROR ",
|
random_line_split
|
klog.rs
|
use fs::{KScheme, Resource, Url};
use fs::resource::ResourceSeek;
use collections::string::String;
use alloc::boxed::Box;
use system::error::Result;
use logging::LogLevel;
/// The kernel log scheme.
pub struct
|
;
impl KScheme for KlogScheme {
/// Returns the name of the scheme: "klog"
fn scheme(&self) -> &str {
"klog"
}
/// Returns a resource. The `url` and `flags` arguments are currently unused.
fn open(&mut self, _: Url, _: usize) -> Result<Box<Resource>> {
Ok(Box::new(KlogResource {
pos: 0,
}))
}
/// Clears the logs.
fn unlink(&mut self, _: Url) -> Result<()> {
let mut logs = ::env().logs.lock();
logs.clear();
Ok(())
}
}
/// The kernel log resource.
pub struct KlogResource {
pos: usize,
}
impl KlogResource {
fn get_log_str(&self) -> String {
let ref mut logs = *::env().logs.lock();
let mut string = String::new();
for &mut (ref level, ref message) in logs {
let prefix: &str = match *level {
LogLevel::Debug => "DEBUG ",
LogLevel::Info => "INFO ",
LogLevel::Warning => "WARN ",
LogLevel::Error => "ERROR ",
LogLevel::Critical => "CRIT ",
};
string.push_str(prefix);
string.push_str(message);
string.push('\n');
}
string
}
}
impl Resource for KlogResource {
fn dup(&self) -> Result<Box<Resource>> {
Ok(Box::new(KlogResource {
pos: self.pos,
}))
}
/// Fills `buf` with the kernel log. Each message is prefixed by its log level:
/// - `CRIT`
/// - `ERROR`
/// - `WARN`
/// - `INFO`
/// - `DEBUG`
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
let mut i = 0;
let logs = self.get_log_str();
while i < buf.len() && self.pos < logs.bytes().count() {
match logs.bytes().nth(self.pos) {
Some(c) => buf[i] = c,
None => ()
}
i += 1;
self.pos += 1;
}
Ok(i)
}
fn seek(&mut self, pos: ResourceSeek) -> Result<usize> {
match pos {
ResourceSeek::Start(offset) => self.pos = offset as usize,
ResourceSeek::Current(offset) => self.pos += offset as usize,
ResourceSeek::End(offset) => {
let logs = self.get_log_str();
self.pos = (logs.bytes().count() as isize + offset) as usize;
}
}
Ok(self.pos)
}
}
|
KlogScheme
|
identifier_name
|
klog.rs
|
use fs::{KScheme, Resource, Url};
use fs::resource::ResourceSeek;
use collections::string::String;
use alloc::boxed::Box;
use system::error::Result;
use logging::LogLevel;
/// The kernel log scheme.
pub struct KlogScheme;
impl KScheme for KlogScheme {
/// Returns the name of the scheme: "klog"
fn scheme(&self) -> &str
|
/// Returns a resource. The `url` and `flags` arguments are currently unused.
fn open(&mut self, _: Url, _: usize) -> Result<Box<Resource>> {
Ok(Box::new(KlogResource {
pos: 0,
}))
}
/// Clears the logs.
fn unlink(&mut self, _: Url) -> Result<()> {
let mut logs = ::env().logs.lock();
logs.clear();
Ok(())
}
}
/// The kernel log resource.
pub struct KlogResource {
pos: usize,
}
impl KlogResource {
fn get_log_str(&self) -> String {
let ref mut logs = *::env().logs.lock();
let mut string = String::new();
for &mut (ref level, ref message) in logs {
let prefix: &str = match *level {
LogLevel::Debug => "DEBUG ",
LogLevel::Info => "INFO ",
LogLevel::Warning => "WARN ",
LogLevel::Error => "ERROR ",
LogLevel::Critical => "CRIT ",
};
string.push_str(prefix);
string.push_str(message);
string.push('\n');
}
string
}
}
impl Resource for KlogResource {
fn dup(&self) -> Result<Box<Resource>> {
Ok(Box::new(KlogResource {
pos: self.pos,
}))
}
/// Fills `buf` with the kernel log. Each message is prefixed by its log level:
/// - `CRIT`
/// - `ERROR`
/// - `WARN`
/// - `INFO`
/// - `DEBUG`
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
let mut i = 0;
let logs = self.get_log_str();
while i < buf.len() && self.pos < logs.bytes().count() {
match logs.bytes().nth(self.pos) {
Some(c) => buf[i] = c,
None => ()
}
i += 1;
self.pos += 1;
}
Ok(i)
}
fn seek(&mut self, pos: ResourceSeek) -> Result<usize> {
match pos {
ResourceSeek::Start(offset) => self.pos = offset as usize,
ResourceSeek::Current(offset) => self.pos += offset as usize,
ResourceSeek::End(offset) => {
let logs = self.get_log_str();
self.pos = (logs.bytes().count() as isize + offset) as usize;
}
}
Ok(self.pos)
}
}
|
{
"klog"
}
|
identifier_body
|
mocking_utils.rs
|
/// Converts non-mutable reference to a mutable one
///
/// Allows creating multiple mutable references to a single item breaking Rust's safety policy.
/// # Safety
/// Use with extreme caution, may cause all sorts of mutability related undefined behaviors!
///
/// One safe use case is when mocking function, which gets called only once during whole test execution, for example:
///
/// ```
/// #[mockable]
/// fn get_string(context: &mut Context) -> &mut String {
/// context.get_mut_string()
/// }
///
/// #[test]
/// fn get_string_test() {
/// let mocked = "mocked".to_string();
/// unsafe {
/// // MockResult::Return(&mut string) would fail
/// get_string.mock_raw(|_| MockResult::Return(as_mut(&mocked)));
/// }
///
/// assert_eq!("mocked", get_string(&mut Context::default()));
/// }
/// ```
pub unsafe fn
|
<T>(t_ref: &T) -> &mut T {
&mut *(t_ref as *const T as *mut T)
}
|
as_mut
|
identifier_name
|
mocking_utils.rs
|
/// Converts non-mutable reference to a mutable one
///
/// Allows creating multiple mutable references to a single item breaking Rust's safety policy.
/// # Safety
/// Use with extreme caution, may cause all sorts of mutability related undefined behaviors!
///
/// One safe use case is when mocking function, which gets called only once during whole test execution, for example:
///
/// ```
/// #[mockable]
/// fn get_string(context: &mut Context) -> &mut String {
/// context.get_mut_string()
/// }
///
/// #[test]
/// fn get_string_test() {
/// let mocked = "mocked".to_string();
/// unsafe {
/// // MockResult::Return(&mut string) would fail
/// get_string.mock_raw(|_| MockResult::Return(as_mut(&mocked)));
/// }
///
/// assert_eq!("mocked", get_string(&mut Context::default()));
/// }
/// ```
|
}
|
pub unsafe fn as_mut<T>(t_ref: &T) -> &mut T {
&mut *(t_ref as *const T as *mut T)
|
random_line_split
|
mocking_utils.rs
|
/// Converts non-mutable reference to a mutable one
///
/// Allows creating multiple mutable references to a single item breaking Rust's safety policy.
/// # Safety
/// Use with extreme caution, may cause all sorts of mutability related undefined behaviors!
///
/// One safe use case is when mocking function, which gets called only once during whole test execution, for example:
///
/// ```
/// #[mockable]
/// fn get_string(context: &mut Context) -> &mut String {
/// context.get_mut_string()
/// }
///
/// #[test]
/// fn get_string_test() {
/// let mocked = "mocked".to_string();
/// unsafe {
/// // MockResult::Return(&mut string) would fail
/// get_string.mock_raw(|_| MockResult::Return(as_mut(&mocked)));
/// }
///
/// assert_eq!("mocked", get_string(&mut Context::default()));
/// }
/// ```
pub unsafe fn as_mut<T>(t_ref: &T) -> &mut T
|
{
&mut *(t_ref as *const T as *mut T)
}
|
identifier_body
|
|
variable_renaming.rs
|
// Serkr - An automated theorem prover. Copyright (C) 2015-2016 Mikko Aarnos.
//
// Serkr is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Serkr is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Serkr. If not, see <http://www.gnu.org/licenses/>.
//
use cnf::ast::{Term, Formula};
use cnf::renaming_info::RenamingInfo;
/// Renames variables so that different occurences of quantifiers bind different variables.
pub fn rename(f: Formula, ri: &mut RenamingInfo) -> Formula {
match f {
Formula::Not(p) => Formula::Not(Box::new(rename(*p, ri))),
Formula::And(l) => Formula::And(l.into_iter().map(|x| rename(x, ri)).collect()),
Formula::Or(l) => Formula::Or(l.into_iter().map(|x| rename(x, ri)).collect()),
Formula::Forall(id, p) => rename_quantifier(id, *p, ri, true),
Formula::Exists(id, p) => rename_quantifier(id, *p, ri, false),
|
}
}
fn rename_quantifier(id: i64,
p: Formula,
ri: &mut RenamingInfo,
universal_quantifier: bool)
-> Formula {
let new_id = ri.create_new_variable_id();
let renamed_p = rename(rename_variable(p, id, new_id), ri);
if universal_quantifier {
Formula::Forall(new_id, Box::new(renamed_p))
} else {
Formula::Exists(new_id, Box::new(renamed_p))
}
}
/// Renames all occurrences of a single variable in a formula to another variable.
fn rename_variable(f: Formula, from: i64, to: i64) -> Formula {
match f {
Formula::Predicate(id, terms) => {
Formula::Predicate(id,
terms.into_iter()
.map(|t| rename_variable_in_term(t, from, to))
.collect())
}
Formula::Not(p) => Formula::Not(Box::new(rename_variable(*p, from, to))),
Formula::And(l) => {
Formula::And(l.into_iter().map(|x| rename_variable(x, from, to)).collect())
}
Formula::Or(l) => {
Formula::Or(l.into_iter().map(|x| rename_variable(x, from, to)).collect())
}
Formula::Forall(id, p) => {
Formula::Forall(if id == from {
to
} else {
id
},
Box::new(rename_variable(*p, from, to)))
}
Formula::Exists(id, p) => {
Formula::Exists(if id == from {
to
} else {
id
},
Box::new(rename_variable(*p, from, to)))
}
_ => f,
}
}
/// Renames all occurrences of a single variable in a term to another variable.
fn rename_variable_in_term(t: Term, from: i64, to: i64) -> Term {
match t {
Term::Variable(id) => {
if from == id {
Term::Variable(to)
} else {
Term::Variable(id)
}
}
Term::Function(id, args) => {
Term::Function(id,
args.into_iter()
.map(|t2| rename_variable_in_term(t2, from, to))
.collect())
}
}
}
#[cfg(test)]
mod test {}
|
_ => f,
|
random_line_split
|
variable_renaming.rs
|
// Serkr - An automated theorem prover. Copyright (C) 2015-2016 Mikko Aarnos.
//
// Serkr is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Serkr is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Serkr. If not, see <http://www.gnu.org/licenses/>.
//
use cnf::ast::{Term, Formula};
use cnf::renaming_info::RenamingInfo;
/// Renames variables so that different occurences of quantifiers bind different variables.
pub fn
|
(f: Formula, ri: &mut RenamingInfo) -> Formula {
match f {
Formula::Not(p) => Formula::Not(Box::new(rename(*p, ri))),
Formula::And(l) => Formula::And(l.into_iter().map(|x| rename(x, ri)).collect()),
Formula::Or(l) => Formula::Or(l.into_iter().map(|x| rename(x, ri)).collect()),
Formula::Forall(id, p) => rename_quantifier(id, *p, ri, true),
Formula::Exists(id, p) => rename_quantifier(id, *p, ri, false),
_ => f,
}
}
fn rename_quantifier(id: i64,
p: Formula,
ri: &mut RenamingInfo,
universal_quantifier: bool)
-> Formula {
let new_id = ri.create_new_variable_id();
let renamed_p = rename(rename_variable(p, id, new_id), ri);
if universal_quantifier {
Formula::Forall(new_id, Box::new(renamed_p))
} else {
Formula::Exists(new_id, Box::new(renamed_p))
}
}
/// Renames all occurrences of a single variable in a formula to another variable.
fn rename_variable(f: Formula, from: i64, to: i64) -> Formula {
match f {
Formula::Predicate(id, terms) => {
Formula::Predicate(id,
terms.into_iter()
.map(|t| rename_variable_in_term(t, from, to))
.collect())
}
Formula::Not(p) => Formula::Not(Box::new(rename_variable(*p, from, to))),
Formula::And(l) => {
Formula::And(l.into_iter().map(|x| rename_variable(x, from, to)).collect())
}
Formula::Or(l) => {
Formula::Or(l.into_iter().map(|x| rename_variable(x, from, to)).collect())
}
Formula::Forall(id, p) => {
Formula::Forall(if id == from {
to
} else {
id
},
Box::new(rename_variable(*p, from, to)))
}
Formula::Exists(id, p) => {
Formula::Exists(if id == from {
to
} else {
id
},
Box::new(rename_variable(*p, from, to)))
}
_ => f,
}
}
/// Renames all occurrences of a single variable in a term to another variable.
fn rename_variable_in_term(t: Term, from: i64, to: i64) -> Term {
match t {
Term::Variable(id) => {
if from == id {
Term::Variable(to)
} else {
Term::Variable(id)
}
}
Term::Function(id, args) => {
Term::Function(id,
args.into_iter()
.map(|t2| rename_variable_in_term(t2, from, to))
.collect())
}
}
}
#[cfg(test)]
mod test {}
|
rename
|
identifier_name
|
variable_renaming.rs
|
// Serkr - An automated theorem prover. Copyright (C) 2015-2016 Mikko Aarnos.
//
// Serkr is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Serkr is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Serkr. If not, see <http://www.gnu.org/licenses/>.
//
use cnf::ast::{Term, Formula};
use cnf::renaming_info::RenamingInfo;
/// Renames variables so that different occurences of quantifiers bind different variables.
pub fn rename(f: Formula, ri: &mut RenamingInfo) -> Formula {
match f {
Formula::Not(p) => Formula::Not(Box::new(rename(*p, ri))),
Formula::And(l) => Formula::And(l.into_iter().map(|x| rename(x, ri)).collect()),
Formula::Or(l) => Formula::Or(l.into_iter().map(|x| rename(x, ri)).collect()),
Formula::Forall(id, p) => rename_quantifier(id, *p, ri, true),
Formula::Exists(id, p) => rename_quantifier(id, *p, ri, false),
_ => f,
}
}
fn rename_quantifier(id: i64,
p: Formula,
ri: &mut RenamingInfo,
universal_quantifier: bool)
-> Formula {
let new_id = ri.create_new_variable_id();
let renamed_p = rename(rename_variable(p, id, new_id), ri);
if universal_quantifier
|
else {
Formula::Exists(new_id, Box::new(renamed_p))
}
}
/// Renames all occurrences of a single variable in a formula to another variable.
fn rename_variable(f: Formula, from: i64, to: i64) -> Formula {
match f {
Formula::Predicate(id, terms) => {
Formula::Predicate(id,
terms.into_iter()
.map(|t| rename_variable_in_term(t, from, to))
.collect())
}
Formula::Not(p) => Formula::Not(Box::new(rename_variable(*p, from, to))),
Formula::And(l) => {
Formula::And(l.into_iter().map(|x| rename_variable(x, from, to)).collect())
}
Formula::Or(l) => {
Formula::Or(l.into_iter().map(|x| rename_variable(x, from, to)).collect())
}
Formula::Forall(id, p) => {
Formula::Forall(if id == from {
to
} else {
id
},
Box::new(rename_variable(*p, from, to)))
}
Formula::Exists(id, p) => {
Formula::Exists(if id == from {
to
} else {
id
},
Box::new(rename_variable(*p, from, to)))
}
_ => f,
}
}
/// Renames all occurrences of a single variable in a term to another variable.
fn rename_variable_in_term(t: Term, from: i64, to: i64) -> Term {
match t {
Term::Variable(id) => {
if from == id {
Term::Variable(to)
} else {
Term::Variable(id)
}
}
Term::Function(id, args) => {
Term::Function(id,
args.into_iter()
.map(|t2| rename_variable_in_term(t2, from, to))
.collect())
}
}
}
#[cfg(test)]
mod test {}
|
{
Formula::Forall(new_id, Box::new(renamed_p))
}
|
conditional_block
|
variable_renaming.rs
|
// Serkr - An automated theorem prover. Copyright (C) 2015-2016 Mikko Aarnos.
//
// Serkr is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Serkr is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Serkr. If not, see <http://www.gnu.org/licenses/>.
//
use cnf::ast::{Term, Formula};
use cnf::renaming_info::RenamingInfo;
/// Renames variables so that different occurences of quantifiers bind different variables.
pub fn rename(f: Formula, ri: &mut RenamingInfo) -> Formula {
match f {
Formula::Not(p) => Formula::Not(Box::new(rename(*p, ri))),
Formula::And(l) => Formula::And(l.into_iter().map(|x| rename(x, ri)).collect()),
Formula::Or(l) => Formula::Or(l.into_iter().map(|x| rename(x, ri)).collect()),
Formula::Forall(id, p) => rename_quantifier(id, *p, ri, true),
Formula::Exists(id, p) => rename_quantifier(id, *p, ri, false),
_ => f,
}
}
fn rename_quantifier(id: i64,
p: Formula,
ri: &mut RenamingInfo,
universal_quantifier: bool)
-> Formula {
let new_id = ri.create_new_variable_id();
let renamed_p = rename(rename_variable(p, id, new_id), ri);
if universal_quantifier {
Formula::Forall(new_id, Box::new(renamed_p))
} else {
Formula::Exists(new_id, Box::new(renamed_p))
}
}
/// Renames all occurrences of a single variable in a formula to another variable.
fn rename_variable(f: Formula, from: i64, to: i64) -> Formula {
match f {
Formula::Predicate(id, terms) => {
Formula::Predicate(id,
terms.into_iter()
.map(|t| rename_variable_in_term(t, from, to))
.collect())
}
Formula::Not(p) => Formula::Not(Box::new(rename_variable(*p, from, to))),
Formula::And(l) => {
Formula::And(l.into_iter().map(|x| rename_variable(x, from, to)).collect())
}
Formula::Or(l) => {
Formula::Or(l.into_iter().map(|x| rename_variable(x, from, to)).collect())
}
Formula::Forall(id, p) => {
Formula::Forall(if id == from {
to
} else {
id
},
Box::new(rename_variable(*p, from, to)))
}
Formula::Exists(id, p) => {
Formula::Exists(if id == from {
to
} else {
id
},
Box::new(rename_variable(*p, from, to)))
}
_ => f,
}
}
/// Renames all occurrences of a single variable in a term to another variable.
fn rename_variable_in_term(t: Term, from: i64, to: i64) -> Term
|
#[cfg(test)]
mod test {}
|
{
match t {
Term::Variable(id) => {
if from == id {
Term::Variable(to)
} else {
Term::Variable(id)
}
}
Term::Function(id, args) => {
Term::Function(id,
args.into_iter()
.map(|t2| rename_variable_in_term(t2, from, to))
.collect())
}
}
}
|
identifier_body
|
service.rs
|
// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use exonum::{
blockchain::{config::InstanceInitParams, ApiSender, SendError},
crypto::{Hash, KeyPair, PublicKey},
helpers::{Height, ValidatorId},
merkledb::{access::Prefixed, BinaryValue, ObjectHash, Snapshot},
runtime::{
ArtifactId, BlockchainData, DispatcherAction, ExecutionContext, ExecutionError,
InstanceDescriptor, InstanceId, InstanceStatus, Mailbox, MethodId, SnapshotExt,
},
};
use futures::{
executor::block_on,
future::{BoxFuture, FutureExt},
};
use std::fmt::{self, Debug};
use super::{api::ServiceApiBuilder, ArtifactProtobufSpec, GenericCall, MethodDescriptor};
/// Describes how the service instance should dispatch specific method calls
/// with consideration of the interface where the method belongs.
///
/// Usually, `ServiceDispatcher` can be derived using the
/// [`ServiceDispatcher`](index.html#examples) macro.
pub trait ServiceDispatcher: Send {
/// Dispatches the interface method call within the specified context.
fn call(
&self,
context: ExecutionContext<'_>,
method: MethodId,
payload: &[u8],
) -> Result<(), ExecutionError>;
}
/// Describes an Exonum service instance.
///
/// `Service` determines how a service instance responds to certain requests and events
/// from the runtime.
///
/// # Implementation Requirements
///
/// Any changes of the storage state in the methods that can perform such changes (i.e., methods
/// receiving `ExecutionContext`) must be the same for all nodes in the blockchain network.
/// In other words, the service should only use data available in the provided context to perform
/// such changes.
pub trait Service: ServiceDispatcher + Debug +'static {
/// Initializes a new service instance with the given parameters. This method is called once
/// after creating a new service instance.
///
/// The default implementation does nothing and returns `Ok(())`.
///
/// The parameters passed to the method are not saved by the framework
/// automatically, hence the user must do it manually, if needed.
fn initialize(
&self,
_context: ExecutionContext<'_>,
_params: Vec<u8>,
) -> Result<(), ExecutionError> {
Ok(())
}
/// Resumes a previously stopped service instance with given parameters. This method
/// is called once after restarting a service instance.
///
/// The default implementation does nothing and returns `Ok(())`.
///
/// The parameters passed to the method are not saved by the framework
/// automatically, hence the user must do it manually, if needed.
///
/// [Migration workflow] guarantees that the data layout is supported by the resumed
/// service version.
///
/// [Migration workflow]: https://exonum.com/doc/version/latest/architecture/services/#data-migrations
fn resume(
&self,
_context: ExecutionContext<'_>,
_params: Vec<u8>,
) -> Result<(), ExecutionError> {
Ok(())
}
/// Performs storage operations on behalf of the service before processing any transaction
/// in the block.
///
/// The default implementation does nothing and returns `Ok(())`.
///
/// Services should not rely on a particular ordering of `Service::before_transactions`
/// invocations among services.
fn before_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
/// Performs storage operations on behalf of the service after processing all transactions
/// in the block.
///
/// The default implementation does nothing and returns `Ok(())`.
///
/// Note that if service was added in the genesis block, it will be activated immediately and
/// thus `after_transactions` will be invoked for such a service after the genesis block creation.
/// If you aren't interested in the processing of for the genesis block, you can use
/// [`ExecutionContext::in_genesis_block`] method and exit early if `true` is returned.
///
/// Invocation of the `height()` method of the core blockchain schema will **panic**
/// if invoked within `after_transactions` of the genesis block. If you are going
/// to process the genesis block and need to know current height, use the `next_height()` method
/// to infer the current blockchain height.
///
/// Services should not rely on a particular ordering of `Service::after_transactions`
/// invocations among services.
///
/// [`ExecutionContext::in_genesis_block`]: struct.ExecutionContext.html#method.in_genesis_block
fn after_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError>
|
/// Handles block commit event.
///
/// This handler is a callback which is invoked by the blockchain
/// after each block commit. For example, a service can broadcast one or more transactions
/// if a specific condition has occurred.
///
/// The default implementation does nothing.
///
/// Try not to perform long operations in this handler since it is executed
/// on the consensus thread.
fn after_commit(&self, _context: AfterCommitContext<'_>) {}
/// Attaches the request handlers of the service API to the Exonum API schema.
///
/// The default implementation does nothing (i.e., does not provide any API for the service).
///
/// The request handlers are mounted on the `/api/services/{instance_name}` path at the
/// listen address of every full node in the blockchain network.
fn wire_api(&self, _builder: &mut ServiceApiBuilder) {}
}
/// Describes a service instance factory for the specific Rust artifact.
///
/// Usually, `ServiceFactory` can be derived using the
/// [`ServiceFactory`](index.html#examples) macro.
pub trait ServiceFactory: Send + Debug +'static {
/// Returns the unique artifact identifier corresponding to the factory.
fn artifact_id(&self) -> ArtifactId;
/// Returns the Protobuf specification used by the instances of this service.
fn artifact_protobuf_spec(&self) -> ArtifactProtobufSpec;
/// Creates a new service instance.
fn create_instance(&self) -> Box<dyn Service>;
}
#[allow(clippy::use_self)] // false positive
impl<T> From<T> for Box<dyn ServiceFactory>
where
T: ServiceFactory,
{
fn from(factory: T) -> Self {
Box::new(factory) as Self
}
}
/// Provides default instance configuration parameters for `ServiceFactory`.
pub trait DefaultInstance: ServiceFactory {
/// Default id for a service.
const INSTANCE_ID: InstanceId;
/// Default name for a service.
const INSTANCE_NAME: &'static str;
/// Creates default instance configuration parameters for the service.
fn default_instance(&self) -> InstanceInitParams {
self.artifact_id()
.into_default_instance(Self::INSTANCE_ID, Self::INSTANCE_NAME)
}
}
/// Provide context for the `after_commit` handler.
pub struct AfterCommitContext<'a> {
/// Reference to the dispatcher mailbox.
mailbox: &'a mut Mailbox,
/// Read-only snapshot of the current blockchain state.
snapshot: &'a dyn Snapshot,
/// Transaction broadcaster.
broadcaster: Broadcaster,
/// ID of the node as a validator.
validator_id: Option<ValidatorId>,
/// Current status of the service.
status: InstanceStatus,
}
impl<'a> AfterCommitContext<'a> {
/// Creates a new `AfterCommit` context.
pub(crate) fn new(
mailbox: &'a mut Mailbox,
instance: InstanceDescriptor,
snapshot: &'a dyn Snapshot,
service_keypair: &'a KeyPair,
tx_sender: &'a ApiSender,
validator_id: Option<ValidatorId>,
) -> Self {
let status = snapshot
.for_dispatcher()
.get_instance(instance.id)
.unwrap_or_else(|| {
panic!("BUG: Cannot find instance state for service `{}`", instance);
})
.status
.expect("BUG: status for a service receiving `after_commit` hook cannot be `None`");
Self {
mailbox,
snapshot,
validator_id,
broadcaster: Broadcaster::new(instance, service_keypair.clone(), tx_sender.clone()),
status,
}
}
/// Returns blockchain data for the snapshot associated with this context.
pub fn data(&self) -> BlockchainData<&'a dyn Snapshot> {
BlockchainData::new(self.snapshot, &self.broadcaster.instance().name)
}
/// Returns snapshot of the data for the executing service.
pub fn service_data(&self) -> Prefixed<&'a dyn Snapshot> {
self.data().for_executing_service()
}
/// Returns a current blockchain height. This height is "height of the latest committed block".
pub fn height(&self) -> Height {
// TODO Perhaps we should optimize this method [ECR-3222]
self.data().for_core().height()
}
/// Returns the service key of this node.
pub fn service_key(&self) -> PublicKey {
self.broadcaster.service_keypair.public_key()
}
/// Returns the ID of this node as a validator. If the node is not a validator, returns `None`.
pub fn validator_id(&self) -> Option<ValidatorId> {
self.validator_id
}
/// Returns the current status of the service.
pub fn status(&self) -> &InstanceStatus {
&self.status
}
/// Returns a transaction broadcaster if the current node is a validator and the service
/// is active (i.e., can process transactions). If these conditions do not hold, returns `None`.
pub fn broadcaster(&self) -> Option<Broadcaster> {
self.validator_id?;
if self.status.is_active() {
Some(self.broadcaster.clone())
} else {
None
}
}
/// Returns a transaction broadcaster regardless of the node status (validator or auditor)
/// and the service status (active or not).
///
/// # Safety
///
/// Transactions for non-active services will not be broadcast successfully; they will be
/// filtered on the receiving nodes as ones that cannot (currently) be processed.
pub fn generic_broadcaster(&self) -> Broadcaster {
self.broadcaster.clone()
}
/// Provides a privileged interface to the supervisor service.
///
/// `None` will be returned if the caller is not a supervisor.
#[doc(hidden)]
pub fn supervisor_extensions(&mut self) -> Option<SupervisorExtensions<'_>> {
if!is_supervisor(self.broadcaster.instance().id) {
return None;
}
Some(SupervisorExtensions {
mailbox: &mut *self.mailbox,
})
}
}
/// Transaction broadcaster.
///
/// Transaction broadcast allows a service to create transactions in the `after_commit`
/// handler or the HTTP API handlers and broadcast them to the connected Exonum nodes.
/// The transactions are addressed to the executing service instance and are signed
/// by the service keypair of the node.
///
/// Broadcasting functionality is primarily useful for services that receive information
/// from outside the blockchain and need to translate it to transactions. As an example,
/// a time oracle service may broadcast local node time and build the blockchain-wide time
/// by processing corresponding transactions.
///
/// # Examples
///
/// Using `Broadcaster` in service logic:
///
/// ```
/// # use exonum_derive::*;
/// use exonum::runtime::{ExecutionContext, ExecutionError};
/// use exonum_rust_runtime::{AfterCommitContext, Service};
///
/// #[exonum_interface]
/// trait MyInterface<Ctx> {
/// type Output;
/// #[interface_method(id = 0)]
/// fn publish_string(&self, ctx: Ctx, value: String) -> Self::Output;
/// }
///
/// #[derive(Debug, ServiceDispatcher, ServiceFactory)]
/// #[service_dispatcher(implements("MyInterface"))]
/// struct MyService;
///
/// impl MyInterface<ExecutionContext<'_>> for MyService {
/// // implementation skipped...
/// # type Output = Result<(), ExecutionError>;
/// # fn publish_string(&self, ctx: ExecutionContext<'_>, value: String) -> Self::Output {
/// # Ok(())
/// # }
/// }
///
/// impl Service for MyService {
/// fn after_commit(&self, ctx: AfterCommitContext<'_>) {
/// if let Some(broadcaster) = ctx.broadcaster() {
/// // Broadcast a `do_something` transaction with
/// // the specified payload. We swallow an error in this case
/// // (in a more thorough setup, it could be logged).
/// broadcaster.blocking().publish_string((), "!".to_owned()).ok();
/// }
/// }
/// }
/// ```
#[derive(Debug, Clone)]
pub struct Broadcaster {
instance: InstanceDescriptor,
service_keypair: KeyPair,
tx_sender: ApiSender,
}
impl Broadcaster {
/// Creates a new broadcaster.
pub(super) fn new(
instance: InstanceDescriptor,
service_keypair: KeyPair,
tx_sender: ApiSender,
) -> Self {
Self {
instance,
service_keypair,
tx_sender,
}
}
/// Returns a synchronous broadcaster that blocks the current thread to broadcast transaction.
pub fn blocking(self) -> BlockingBroadcaster {
BlockingBroadcaster(self)
}
pub(super) fn keypair(&self) -> &KeyPair {
&self.service_keypair
}
pub(super) fn instance(&self) -> &InstanceDescriptor {
&self.instance
}
}
/// Signs and asynchronously broadcasts a transaction to the other nodes in the network.
///
/// The transaction is signed by the service keypair of the node. The same input transaction
/// will lead to the identical transaction being broadcast. If this is undesired, add a nonce
/// field to the input transaction (e.g., a `u64`) and change it between the calls.
///
/// # Return value
///
/// Returns the hash of the created transaction, or an error if the transaction cannot be
/// broadcast. An error means that the node is being shut down.
impl GenericCall<()> for Broadcaster {
type Output = BoxFuture<'static, Result<Hash, SendError>>;
fn generic_call(&self, _ctx: (), method: MethodDescriptor<'_>, args: Vec<u8>) -> Self::Output {
let msg = self
.service_keypair
.clone()
.generic_call(self.instance().id, method, args);
let tx_hash = msg.object_hash();
let tx_sender = self.tx_sender.clone();
async move {
tx_sender.broadcast_transaction(msg).await?;
Ok(tx_hash)
}
.boxed()
}
}
/// A wrapper around the [`Broadcaster`] to broadcast transactions synchronously.
///
/// [`Broadcaster`]: struct.Broadcaster.html
#[derive(Debug, Clone)]
pub struct BlockingBroadcaster(Broadcaster);
/// Signs and synchronously broadcasts a transaction to the other nodes in the network.
///
/// The transaction is signed by the service keypair of the node. The same input transaction
/// will lead to the identical transaction being broadcast. If this is undesired, add a nonce
/// field to the input transaction (e.g., a `u64`) and change it between the calls.
///
/// # Return value
///
/// Returns the hash of the created transaction, or an error if the transaction cannot be
/// broadcast. An error means that the node is being shut down.
impl GenericCall<()> for BlockingBroadcaster {
type Output = Result<Hash, SendError>;
fn generic_call(&self, _ctx: (), method: MethodDescriptor<'_>, args: Vec<u8>) -> Self::Output {
block_on(self.0.generic_call((), method, args))
}
}
/// Extended blockchain interface for the service instance authorized as a supervisor.
#[derive(Debug)]
pub struct SupervisorExtensions<'a> {
mailbox: &'a mut Mailbox,
}
impl SupervisorExtensions<'_> {
/// Starts the deployment of an artifact. The provided callback is executed after
/// the deployment is completed.
pub fn start_deploy(
&mut self,
artifact: ArtifactId,
spec: impl BinaryValue,
then: impl FnOnce(Result<(), ExecutionError>) -> Result<(), ExecutionError> + Send +'static,
) {
let action = DispatcherAction::StartDeploy {
artifact,
spec: spec.into_bytes(),
then: Box::new(then),
};
self.mailbox.push(action);
}
}
impl Debug for AfterCommitContext<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AfterCommitContext")
.field("instance", &self.broadcaster.instance)
.finish()
}
}
fn is_supervisor(instance_id: InstanceId) -> bool {
instance_id == exonum::runtime::SUPERVISOR_INSTANCE_ID
}
|
{
Ok(())
}
|
identifier_body
|
service.rs
|
// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
// limitations under the License.
use exonum::{
blockchain::{config::InstanceInitParams, ApiSender, SendError},
crypto::{Hash, KeyPair, PublicKey},
helpers::{Height, ValidatorId},
merkledb::{access::Prefixed, BinaryValue, ObjectHash, Snapshot},
runtime::{
ArtifactId, BlockchainData, DispatcherAction, ExecutionContext, ExecutionError,
InstanceDescriptor, InstanceId, InstanceStatus, Mailbox, MethodId, SnapshotExt,
},
};
use futures::{
executor::block_on,
future::{BoxFuture, FutureExt},
};
use std::fmt::{self, Debug};
use super::{api::ServiceApiBuilder, ArtifactProtobufSpec, GenericCall, MethodDescriptor};
/// Describes how the service instance should dispatch specific method calls
/// with consideration of the interface where the method belongs.
///
/// Usually, `ServiceDispatcher` can be derived using the
/// [`ServiceDispatcher`](index.html#examples) macro.
pub trait ServiceDispatcher: Send {
/// Dispatches the interface method call within the specified context.
fn call(
&self,
context: ExecutionContext<'_>,
method: MethodId,
payload: &[u8],
) -> Result<(), ExecutionError>;
}
/// Describes an Exonum service instance.
///
/// `Service` determines how a service instance responds to certain requests and events
/// from the runtime.
///
/// # Implementation Requirements
///
/// Any changes of the storage state in the methods that can perform such changes (i.e., methods
/// receiving `ExecutionContext`) must be the same for all nodes in the blockchain network.
/// In other words, the service should only use data available in the provided context to perform
/// such changes.
pub trait Service: ServiceDispatcher + Debug +'static {
/// Initializes a new service instance with the given parameters. This method is called once
/// after creating a new service instance.
///
/// The default implementation does nothing and returns `Ok(())`.
///
/// The parameters passed to the method are not saved by the framework
/// automatically, hence the user must do it manually, if needed.
fn initialize(
&self,
_context: ExecutionContext<'_>,
_params: Vec<u8>,
) -> Result<(), ExecutionError> {
Ok(())
}
/// Resumes a previously stopped service instance with given parameters. This method
/// is called once after restarting a service instance.
///
/// The default implementation does nothing and returns `Ok(())`.
///
/// The parameters passed to the method are not saved by the framework
/// automatically, hence the user must do it manually, if needed.
///
/// [Migration workflow] guarantees that the data layout is supported by the resumed
/// service version.
///
/// [Migration workflow]: https://exonum.com/doc/version/latest/architecture/services/#data-migrations
fn resume(
&self,
_context: ExecutionContext<'_>,
_params: Vec<u8>,
) -> Result<(), ExecutionError> {
Ok(())
}
/// Performs storage operations on behalf of the service before processing any transaction
/// in the block.
///
/// The default implementation does nothing and returns `Ok(())`.
///
/// Services should not rely on a particular ordering of `Service::before_transactions`
/// invocations among services.
fn before_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
/// Performs storage operations on behalf of the service after processing all transactions
/// in the block.
///
/// The default implementation does nothing and returns `Ok(())`.
///
/// Note that if service was added in the genesis block, it will be activated immediately and
/// thus `after_transactions` will be invoked for such a service after the genesis block creation.
/// If you aren't interested in the processing of for the genesis block, you can use
/// [`ExecutionContext::in_genesis_block`] method and exit early if `true` is returned.
///
/// Invocation of the `height()` method of the core blockchain schema will **panic**
/// if invoked within `after_transactions` of the genesis block. If you are going
/// to process the genesis block and need to know current height, use the `next_height()` method
/// to infer the current blockchain height.
///
/// Services should not rely on a particular ordering of `Service::after_transactions`
/// invocations among services.
///
/// [`ExecutionContext::in_genesis_block`]: struct.ExecutionContext.html#method.in_genesis_block
fn after_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
/// Handles block commit event.
///
/// This handler is a callback which is invoked by the blockchain
/// after each block commit. For example, a service can broadcast one or more transactions
/// if a specific condition has occurred.
///
/// The default implementation does nothing.
///
/// Try not to perform long operations in this handler since it is executed
/// on the consensus thread.
fn after_commit(&self, _context: AfterCommitContext<'_>) {}
/// Attaches the request handlers of the service API to the Exonum API schema.
///
/// The default implementation does nothing (i.e., does not provide any API for the service).
///
/// The request handlers are mounted on the `/api/services/{instance_name}` path at the
/// listen address of every full node in the blockchain network.
fn wire_api(&self, _builder: &mut ServiceApiBuilder) {}
}
/// Describes a service instance factory for the specific Rust artifact.
///
/// Usually, `ServiceFactory` can be derived using the
/// [`ServiceFactory`](index.html#examples) macro.
pub trait ServiceFactory: Send + Debug +'static {
/// Returns the unique artifact identifier corresponding to the factory.
fn artifact_id(&self) -> ArtifactId;
/// Returns the Protobuf specification used by the instances of this service.
fn artifact_protobuf_spec(&self) -> ArtifactProtobufSpec;
/// Creates a new service instance.
fn create_instance(&self) -> Box<dyn Service>;
}
#[allow(clippy::use_self)] // false positive
impl<T> From<T> for Box<dyn ServiceFactory>
where
T: ServiceFactory,
{
fn from(factory: T) -> Self {
Box::new(factory) as Self
}
}
/// Provides default instance configuration parameters for `ServiceFactory`.
pub trait DefaultInstance: ServiceFactory {
/// Default id for a service.
const INSTANCE_ID: InstanceId;
/// Default name for a service.
const INSTANCE_NAME: &'static str;
/// Creates default instance configuration parameters for the service.
fn default_instance(&self) -> InstanceInitParams {
self.artifact_id()
.into_default_instance(Self::INSTANCE_ID, Self::INSTANCE_NAME)
}
}
/// Provide context for the `after_commit` handler.
pub struct AfterCommitContext<'a> {
/// Reference to the dispatcher mailbox.
mailbox: &'a mut Mailbox,
/// Read-only snapshot of the current blockchain state.
snapshot: &'a dyn Snapshot,
/// Transaction broadcaster.
broadcaster: Broadcaster,
/// ID of the node as a validator.
validator_id: Option<ValidatorId>,
/// Current status of the service.
status: InstanceStatus,
}
impl<'a> AfterCommitContext<'a> {
/// Creates a new `AfterCommit` context.
pub(crate) fn new(
mailbox: &'a mut Mailbox,
instance: InstanceDescriptor,
snapshot: &'a dyn Snapshot,
service_keypair: &'a KeyPair,
tx_sender: &'a ApiSender,
validator_id: Option<ValidatorId>,
) -> Self {
let status = snapshot
.for_dispatcher()
.get_instance(instance.id)
.unwrap_or_else(|| {
panic!("BUG: Cannot find instance state for service `{}`", instance);
})
.status
.expect("BUG: status for a service receiving `after_commit` hook cannot be `None`");
Self {
mailbox,
snapshot,
validator_id,
broadcaster: Broadcaster::new(instance, service_keypair.clone(), tx_sender.clone()),
status,
}
}
/// Returns blockchain data for the snapshot associated with this context.
pub fn data(&self) -> BlockchainData<&'a dyn Snapshot> {
BlockchainData::new(self.snapshot, &self.broadcaster.instance().name)
}
/// Returns snapshot of the data for the executing service.
pub fn service_data(&self) -> Prefixed<&'a dyn Snapshot> {
self.data().for_executing_service()
}
/// Returns a current blockchain height. This height is "height of the latest committed block".
pub fn height(&self) -> Height {
// TODO Perhaps we should optimize this method [ECR-3222]
self.data().for_core().height()
}
/// Returns the service key of this node.
pub fn service_key(&self) -> PublicKey {
self.broadcaster.service_keypair.public_key()
}
/// Returns the ID of this node as a validator. If the node is not a validator, returns `None`.
pub fn validator_id(&self) -> Option<ValidatorId> {
self.validator_id
}
/// Returns the current status of the service.
pub fn status(&self) -> &InstanceStatus {
&self.status
}
/// Returns a transaction broadcaster if the current node is a validator and the service
/// is active (i.e., can process transactions). If these conditions do not hold, returns `None`.
pub fn broadcaster(&self) -> Option<Broadcaster> {
self.validator_id?;
if self.status.is_active() {
Some(self.broadcaster.clone())
} else {
None
}
}
/// Returns a transaction broadcaster regardless of the node status (validator or auditor)
/// and the service status (active or not).
///
/// # Safety
///
/// Transactions for non-active services will not be broadcast successfully; they will be
/// filtered on the receiving nodes as ones that cannot (currently) be processed.
pub fn generic_broadcaster(&self) -> Broadcaster {
self.broadcaster.clone()
}
/// Provides a privileged interface to the supervisor service.
///
/// `None` will be returned if the caller is not a supervisor.
#[doc(hidden)]
pub fn supervisor_extensions(&mut self) -> Option<SupervisorExtensions<'_>> {
if!is_supervisor(self.broadcaster.instance().id) {
return None;
}
Some(SupervisorExtensions {
mailbox: &mut *self.mailbox,
})
}
}
/// Transaction broadcaster.
///
/// Transaction broadcast allows a service to create transactions in the `after_commit`
/// handler or the HTTP API handlers and broadcast them to the connected Exonum nodes.
/// The transactions are addressed to the executing service instance and are signed
/// by the service keypair of the node.
///
/// Broadcasting functionality is primarily useful for services that receive information
/// from outside the blockchain and need to translate it to transactions. As an example,
/// a time oracle service may broadcast local node time and build the blockchain-wide time
/// by processing corresponding transactions.
///
/// # Examples
///
/// Using `Broadcaster` in service logic:
///
/// ```
/// # use exonum_derive::*;
/// use exonum::runtime::{ExecutionContext, ExecutionError};
/// use exonum_rust_runtime::{AfterCommitContext, Service};
///
/// #[exonum_interface]
/// trait MyInterface<Ctx> {
/// type Output;
/// #[interface_method(id = 0)]
/// fn publish_string(&self, ctx: Ctx, value: String) -> Self::Output;
/// }
///
/// #[derive(Debug, ServiceDispatcher, ServiceFactory)]
/// #[service_dispatcher(implements("MyInterface"))]
/// struct MyService;
///
/// impl MyInterface<ExecutionContext<'_>> for MyService {
/// // implementation skipped...
/// # type Output = Result<(), ExecutionError>;
/// # fn publish_string(&self, ctx: ExecutionContext<'_>, value: String) -> Self::Output {
/// # Ok(())
/// # }
/// }
///
/// impl Service for MyService {
/// fn after_commit(&self, ctx: AfterCommitContext<'_>) {
/// if let Some(broadcaster) = ctx.broadcaster() {
/// // Broadcast a `do_something` transaction with
/// // the specified payload. We swallow an error in this case
/// // (in a more thorough setup, it could be logged).
/// broadcaster.blocking().publish_string((), "!".to_owned()).ok();
/// }
/// }
/// }
/// ```
#[derive(Debug, Clone)]
pub struct Broadcaster {
instance: InstanceDescriptor,
service_keypair: KeyPair,
tx_sender: ApiSender,
}
impl Broadcaster {
/// Creates a new broadcaster.
pub(super) fn new(
instance: InstanceDescriptor,
service_keypair: KeyPair,
tx_sender: ApiSender,
) -> Self {
Self {
instance,
service_keypair,
tx_sender,
}
}
/// Returns a synchronous broadcaster that blocks the current thread to broadcast transaction.
pub fn blocking(self) -> BlockingBroadcaster {
BlockingBroadcaster(self)
}
pub(super) fn keypair(&self) -> &KeyPair {
&self.service_keypair
}
pub(super) fn instance(&self) -> &InstanceDescriptor {
&self.instance
}
}
/// Signs and asynchronously broadcasts a transaction to the other nodes in the network.
///
/// The transaction is signed by the service keypair of the node. The same input transaction
/// will lead to the identical transaction being broadcast. If this is undesired, add a nonce
/// field to the input transaction (e.g., a `u64`) and change it between the calls.
///
/// # Return value
///
/// Returns the hash of the created transaction, or an error if the transaction cannot be
/// broadcast. An error means that the node is being shut down.
impl GenericCall<()> for Broadcaster {
type Output = BoxFuture<'static, Result<Hash, SendError>>;
fn generic_call(&self, _ctx: (), method: MethodDescriptor<'_>, args: Vec<u8>) -> Self::Output {
let msg = self
.service_keypair
.clone()
.generic_call(self.instance().id, method, args);
let tx_hash = msg.object_hash();
let tx_sender = self.tx_sender.clone();
async move {
tx_sender.broadcast_transaction(msg).await?;
Ok(tx_hash)
}
.boxed()
}
}
/// A wrapper around the [`Broadcaster`] to broadcast transactions synchronously.
///
/// [`Broadcaster`]: struct.Broadcaster.html
#[derive(Debug, Clone)]
pub struct BlockingBroadcaster(Broadcaster);
/// Signs and synchronously broadcasts a transaction to the other nodes in the network.
///
/// The transaction is signed by the service keypair of the node. The same input transaction
/// will lead to the identical transaction being broadcast. If this is undesired, add a nonce
/// field to the input transaction (e.g., a `u64`) and change it between the calls.
///
/// # Return value
///
/// Returns the hash of the created transaction, or an error if the transaction cannot be
/// broadcast. An error means that the node is being shut down.
impl GenericCall<()> for BlockingBroadcaster {
type Output = Result<Hash, SendError>;
fn generic_call(&self, _ctx: (), method: MethodDescriptor<'_>, args: Vec<u8>) -> Self::Output {
block_on(self.0.generic_call((), method, args))
}
}
/// Extended blockchain interface for the service instance authorized as a supervisor.
#[derive(Debug)]
pub struct SupervisorExtensions<'a> {
mailbox: &'a mut Mailbox,
}
impl SupervisorExtensions<'_> {
/// Starts the deployment of an artifact. The provided callback is executed after
/// the deployment is completed.
pub fn start_deploy(
&mut self,
artifact: ArtifactId,
spec: impl BinaryValue,
then: impl FnOnce(Result<(), ExecutionError>) -> Result<(), ExecutionError> + Send +'static,
) {
let action = DispatcherAction::StartDeploy {
artifact,
spec: spec.into_bytes(),
then: Box::new(then),
};
self.mailbox.push(action);
}
}
impl Debug for AfterCommitContext<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AfterCommitContext")
.field("instance", &self.broadcaster.instance)
.finish()
}
}
fn is_supervisor(instance_id: InstanceId) -> bool {
instance_id == exonum::runtime::SUPERVISOR_INSTANCE_ID
}
|
// See the License for the specific language governing permissions and
|
random_line_split
|
service.rs
|
// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use exonum::{
blockchain::{config::InstanceInitParams, ApiSender, SendError},
crypto::{Hash, KeyPair, PublicKey},
helpers::{Height, ValidatorId},
merkledb::{access::Prefixed, BinaryValue, ObjectHash, Snapshot},
runtime::{
ArtifactId, BlockchainData, DispatcherAction, ExecutionContext, ExecutionError,
InstanceDescriptor, InstanceId, InstanceStatus, Mailbox, MethodId, SnapshotExt,
},
};
use futures::{
executor::block_on,
future::{BoxFuture, FutureExt},
};
use std::fmt::{self, Debug};
use super::{api::ServiceApiBuilder, ArtifactProtobufSpec, GenericCall, MethodDescriptor};
/// Describes how the service instance should dispatch specific method calls
/// with consideration of the interface where the method belongs.
///
/// Usually, `ServiceDispatcher` can be derived using the
/// [`ServiceDispatcher`](index.html#examples) macro.
pub trait ServiceDispatcher: Send {
/// Dispatches the interface method call within the specified context.
fn call(
&self,
context: ExecutionContext<'_>,
method: MethodId,
payload: &[u8],
) -> Result<(), ExecutionError>;
}
/// Describes an Exonum service instance.
///
/// `Service` determines how a service instance responds to certain requests and events
/// from the runtime.
///
/// # Implementation Requirements
///
/// Any changes of the storage state in the methods that can perform such changes (i.e., methods
/// receiving `ExecutionContext`) must be the same for all nodes in the blockchain network.
/// In other words, the service should only use data available in the provided context to perform
/// such changes.
pub trait Service: ServiceDispatcher + Debug +'static {
/// Initializes a new service instance with the given parameters. This method is called once
/// after creating a new service instance.
///
/// The default implementation does nothing and returns `Ok(())`.
///
/// The parameters passed to the method are not saved by the framework
/// automatically, hence the user must do it manually, if needed.
fn initialize(
&self,
_context: ExecutionContext<'_>,
_params: Vec<u8>,
) -> Result<(), ExecutionError> {
Ok(())
}
/// Resumes a previously stopped service instance with given parameters. This method
/// is called once after restarting a service instance.
///
/// The default implementation does nothing and returns `Ok(())`.
///
/// The parameters passed to the method are not saved by the framework
/// automatically, hence the user must do it manually, if needed.
///
/// [Migration workflow] guarantees that the data layout is supported by the resumed
/// service version.
///
/// [Migration workflow]: https://exonum.com/doc/version/latest/architecture/services/#data-migrations
fn resume(
&self,
_context: ExecutionContext<'_>,
_params: Vec<u8>,
) -> Result<(), ExecutionError> {
Ok(())
}
/// Performs storage operations on behalf of the service before processing any transaction
/// in the block.
///
/// The default implementation does nothing and returns `Ok(())`.
///
/// Services should not rely on a particular ordering of `Service::before_transactions`
/// invocations among services.
fn before_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
/// Performs storage operations on behalf of the service after processing all transactions
/// in the block.
///
/// The default implementation does nothing and returns `Ok(())`.
///
/// Note that if service was added in the genesis block, it will be activated immediately and
/// thus `after_transactions` will be invoked for such a service after the genesis block creation.
/// If you aren't interested in the processing of for the genesis block, you can use
/// [`ExecutionContext::in_genesis_block`] method and exit early if `true` is returned.
///
/// Invocation of the `height()` method of the core blockchain schema will **panic**
/// if invoked within `after_transactions` of the genesis block. If you are going
/// to process the genesis block and need to know current height, use the `next_height()` method
/// to infer the current blockchain height.
///
/// Services should not rely on a particular ordering of `Service::after_transactions`
/// invocations among services.
///
/// [`ExecutionContext::in_genesis_block`]: struct.ExecutionContext.html#method.in_genesis_block
fn after_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
/// Handles block commit event.
///
/// This handler is a callback which is invoked by the blockchain
/// after each block commit. For example, a service can broadcast one or more transactions
/// if a specific condition has occurred.
///
/// The default implementation does nothing.
///
/// Try not to perform long operations in this handler since it is executed
/// on the consensus thread.
fn after_commit(&self, _context: AfterCommitContext<'_>) {}
/// Attaches the request handlers of the service API to the Exonum API schema.
///
/// The default implementation does nothing (i.e., does not provide any API for the service).
///
/// The request handlers are mounted on the `/api/services/{instance_name}` path at the
/// listen address of every full node in the blockchain network.
fn wire_api(&self, _builder: &mut ServiceApiBuilder) {}
}
/// Describes a service instance factory for the specific Rust artifact.
///
/// Usually, `ServiceFactory` can be derived using the
/// [`ServiceFactory`](index.html#examples) macro.
pub trait ServiceFactory: Send + Debug +'static {
/// Returns the unique artifact identifier corresponding to the factory.
fn artifact_id(&self) -> ArtifactId;
/// Returns the Protobuf specification used by the instances of this service.
fn artifact_protobuf_spec(&self) -> ArtifactProtobufSpec;
/// Creates a new service instance.
fn create_instance(&self) -> Box<dyn Service>;
}
#[allow(clippy::use_self)] // false positive
impl<T> From<T> for Box<dyn ServiceFactory>
where
T: ServiceFactory,
{
fn from(factory: T) -> Self {
Box::new(factory) as Self
}
}
/// Provides default instance configuration parameters for `ServiceFactory`.
pub trait DefaultInstance: ServiceFactory {
/// Default id for a service.
const INSTANCE_ID: InstanceId;
/// Default name for a service.
const INSTANCE_NAME: &'static str;
/// Creates default instance configuration parameters for the service.
fn default_instance(&self) -> InstanceInitParams {
self.artifact_id()
.into_default_instance(Self::INSTANCE_ID, Self::INSTANCE_NAME)
}
}
/// Provide context for the `after_commit` handler.
pub struct AfterCommitContext<'a> {
/// Reference to the dispatcher mailbox.
mailbox: &'a mut Mailbox,
/// Read-only snapshot of the current blockchain state.
snapshot: &'a dyn Snapshot,
/// Transaction broadcaster.
broadcaster: Broadcaster,
/// ID of the node as a validator.
validator_id: Option<ValidatorId>,
/// Current status of the service.
status: InstanceStatus,
}
impl<'a> AfterCommitContext<'a> {
/// Creates a new `AfterCommit` context.
pub(crate) fn new(
mailbox: &'a mut Mailbox,
instance: InstanceDescriptor,
snapshot: &'a dyn Snapshot,
service_keypair: &'a KeyPair,
tx_sender: &'a ApiSender,
validator_id: Option<ValidatorId>,
) -> Self {
let status = snapshot
.for_dispatcher()
.get_instance(instance.id)
.unwrap_or_else(|| {
panic!("BUG: Cannot find instance state for service `{}`", instance);
})
.status
.expect("BUG: status for a service receiving `after_commit` hook cannot be `None`");
Self {
mailbox,
snapshot,
validator_id,
broadcaster: Broadcaster::new(instance, service_keypair.clone(), tx_sender.clone()),
status,
}
}
/// Returns blockchain data for the snapshot associated with this context.
pub fn data(&self) -> BlockchainData<&'a dyn Snapshot> {
BlockchainData::new(self.snapshot, &self.broadcaster.instance().name)
}
/// Returns snapshot of the data for the executing service.
pub fn service_data(&self) -> Prefixed<&'a dyn Snapshot> {
self.data().for_executing_service()
}
/// Returns a current blockchain height. This height is "height of the latest committed block".
pub fn height(&self) -> Height {
// TODO Perhaps we should optimize this method [ECR-3222]
self.data().for_core().height()
}
/// Returns the service key of this node.
pub fn service_key(&self) -> PublicKey {
self.broadcaster.service_keypair.public_key()
}
/// Returns the ID of this node as a validator. If the node is not a validator, returns `None`.
pub fn validator_id(&self) -> Option<ValidatorId> {
self.validator_id
}
/// Returns the current status of the service.
pub fn status(&self) -> &InstanceStatus {
&self.status
}
/// Returns a transaction broadcaster if the current node is a validator and the service
/// is active (i.e., can process transactions). If these conditions do not hold, returns `None`.
pub fn broadcaster(&self) -> Option<Broadcaster> {
self.validator_id?;
if self.status.is_active()
|
else {
None
}
}
/// Returns a transaction broadcaster regardless of the node status (validator or auditor)
/// and the service status (active or not).
///
/// # Safety
///
/// Transactions for non-active services will not be broadcast successfully; they will be
/// filtered on the receiving nodes as ones that cannot (currently) be processed.
pub fn generic_broadcaster(&self) -> Broadcaster {
self.broadcaster.clone()
}
/// Provides a privileged interface to the supervisor service.
///
/// `None` will be returned if the caller is not a supervisor.
#[doc(hidden)]
pub fn supervisor_extensions(&mut self) -> Option<SupervisorExtensions<'_>> {
if!is_supervisor(self.broadcaster.instance().id) {
return None;
}
Some(SupervisorExtensions {
mailbox: &mut *self.mailbox,
})
}
}
/// Transaction broadcaster.
///
/// Transaction broadcast allows a service to create transactions in the `after_commit`
/// handler or the HTTP API handlers and broadcast them to the connected Exonum nodes.
/// The transactions are addressed to the executing service instance and are signed
/// by the service keypair of the node.
///
/// Broadcasting functionality is primarily useful for services that receive information
/// from outside the blockchain and need to translate it to transactions. As an example,
/// a time oracle service may broadcast local node time and build the blockchain-wide time
/// by processing corresponding transactions.
///
/// # Examples
///
/// Using `Broadcaster` in service logic:
///
/// ```
/// # use exonum_derive::*;
/// use exonum::runtime::{ExecutionContext, ExecutionError};
/// use exonum_rust_runtime::{AfterCommitContext, Service};
///
/// #[exonum_interface]
/// trait MyInterface<Ctx> {
/// type Output;
/// #[interface_method(id = 0)]
/// fn publish_string(&self, ctx: Ctx, value: String) -> Self::Output;
/// }
///
/// #[derive(Debug, ServiceDispatcher, ServiceFactory)]
/// #[service_dispatcher(implements("MyInterface"))]
/// struct MyService;
///
/// impl MyInterface<ExecutionContext<'_>> for MyService {
/// // implementation skipped...
/// # type Output = Result<(), ExecutionError>;
/// # fn publish_string(&self, ctx: ExecutionContext<'_>, value: String) -> Self::Output {
/// # Ok(())
/// # }
/// }
///
/// impl Service for MyService {
/// fn after_commit(&self, ctx: AfterCommitContext<'_>) {
/// if let Some(broadcaster) = ctx.broadcaster() {
/// // Broadcast a `do_something` transaction with
/// // the specified payload. We swallow an error in this case
/// // (in a more thorough setup, it could be logged).
/// broadcaster.blocking().publish_string((), "!".to_owned()).ok();
/// }
/// }
/// }
/// ```
#[derive(Debug, Clone)]
pub struct Broadcaster {
instance: InstanceDescriptor,
service_keypair: KeyPair,
tx_sender: ApiSender,
}
impl Broadcaster {
/// Creates a new broadcaster.
pub(super) fn new(
instance: InstanceDescriptor,
service_keypair: KeyPair,
tx_sender: ApiSender,
) -> Self {
Self {
instance,
service_keypair,
tx_sender,
}
}
/// Returns a synchronous broadcaster that blocks the current thread to broadcast transaction.
pub fn blocking(self) -> BlockingBroadcaster {
BlockingBroadcaster(self)
}
pub(super) fn keypair(&self) -> &KeyPair {
&self.service_keypair
}
pub(super) fn instance(&self) -> &InstanceDescriptor {
&self.instance
}
}
/// Signs and asynchronously broadcasts a transaction to the other nodes in the network.
///
/// The transaction is signed by the service keypair of the node. The same input transaction
/// will lead to the identical transaction being broadcast. If this is undesired, add a nonce
/// field to the input transaction (e.g., a `u64`) and change it between the calls.
///
/// # Return value
///
/// Returns the hash of the created transaction, or an error if the transaction cannot be
/// broadcast. An error means that the node is being shut down.
impl GenericCall<()> for Broadcaster {
type Output = BoxFuture<'static, Result<Hash, SendError>>;
fn generic_call(&self, _ctx: (), method: MethodDescriptor<'_>, args: Vec<u8>) -> Self::Output {
let msg = self
.service_keypair
.clone()
.generic_call(self.instance().id, method, args);
let tx_hash = msg.object_hash();
let tx_sender = self.tx_sender.clone();
async move {
tx_sender.broadcast_transaction(msg).await?;
Ok(tx_hash)
}
.boxed()
}
}
/// A wrapper around the [`Broadcaster`] to broadcast transactions synchronously.
///
/// [`Broadcaster`]: struct.Broadcaster.html
#[derive(Debug, Clone)]
pub struct BlockingBroadcaster(Broadcaster);
/// Signs and synchronously broadcasts a transaction to the other nodes in the network.
///
/// The transaction is signed by the service keypair of the node. The same input transaction
/// will lead to the identical transaction being broadcast. If this is undesired, add a nonce
/// field to the input transaction (e.g., a `u64`) and change it between the calls.
///
/// # Return value
///
/// Returns the hash of the created transaction, or an error if the transaction cannot be
/// broadcast. An error means that the node is being shut down.
impl GenericCall<()> for BlockingBroadcaster {
type Output = Result<Hash, SendError>;
fn generic_call(&self, _ctx: (), method: MethodDescriptor<'_>, args: Vec<u8>) -> Self::Output {
block_on(self.0.generic_call((), method, args))
}
}
/// Extended blockchain interface for the service instance authorized as a supervisor.
#[derive(Debug)]
pub struct SupervisorExtensions<'a> {
mailbox: &'a mut Mailbox,
}
impl SupervisorExtensions<'_> {
/// Starts the deployment of an artifact. The provided callback is executed after
/// the deployment is completed.
pub fn start_deploy(
&mut self,
artifact: ArtifactId,
spec: impl BinaryValue,
then: impl FnOnce(Result<(), ExecutionError>) -> Result<(), ExecutionError> + Send +'static,
) {
let action = DispatcherAction::StartDeploy {
artifact,
spec: spec.into_bytes(),
then: Box::new(then),
};
self.mailbox.push(action);
}
}
impl Debug for AfterCommitContext<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AfterCommitContext")
.field("instance", &self.broadcaster.instance)
.finish()
}
}
fn is_supervisor(instance_id: InstanceId) -> bool {
instance_id == exonum::runtime::SUPERVISOR_INSTANCE_ID
}
|
{
Some(self.broadcaster.clone())
}
|
conditional_block
|
service.rs
|
// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use exonum::{
blockchain::{config::InstanceInitParams, ApiSender, SendError},
crypto::{Hash, KeyPair, PublicKey},
helpers::{Height, ValidatorId},
merkledb::{access::Prefixed, BinaryValue, ObjectHash, Snapshot},
runtime::{
ArtifactId, BlockchainData, DispatcherAction, ExecutionContext, ExecutionError,
InstanceDescriptor, InstanceId, InstanceStatus, Mailbox, MethodId, SnapshotExt,
},
};
use futures::{
executor::block_on,
future::{BoxFuture, FutureExt},
};
use std::fmt::{self, Debug};
use super::{api::ServiceApiBuilder, ArtifactProtobufSpec, GenericCall, MethodDescriptor};
/// Describes how the service instance should dispatch specific method calls
/// with consideration of the interface where the method belongs.
///
/// Usually, `ServiceDispatcher` can be derived using the
/// [`ServiceDispatcher`](index.html#examples) macro.
pub trait ServiceDispatcher: Send {
/// Dispatches the interface method call within the specified context.
fn call(
&self,
context: ExecutionContext<'_>,
method: MethodId,
payload: &[u8],
) -> Result<(), ExecutionError>;
}
/// Describes an Exonum service instance.
///
/// `Service` determines how a service instance responds to certain requests and events
/// from the runtime.
///
/// # Implementation Requirements
///
/// Any changes of the storage state in the methods that can perform such changes (i.e., methods
/// receiving `ExecutionContext`) must be the same for all nodes in the blockchain network.
/// In other words, the service should only use data available in the provided context to perform
/// such changes.
pub trait Service: ServiceDispatcher + Debug +'static {
/// Initializes a new service instance with the given parameters. This method is called once
/// after creating a new service instance.
///
/// The default implementation does nothing and returns `Ok(())`.
///
/// The parameters passed to the method are not saved by the framework
/// automatically, hence the user must do it manually, if needed.
fn initialize(
&self,
_context: ExecutionContext<'_>,
_params: Vec<u8>,
) -> Result<(), ExecutionError> {
Ok(())
}
/// Resumes a previously stopped service instance with given parameters. This method
/// is called once after restarting a service instance.
///
/// The default implementation does nothing and returns `Ok(())`.
///
/// The parameters passed to the method are not saved by the framework
/// automatically, hence the user must do it manually, if needed.
///
/// [Migration workflow] guarantees that the data layout is supported by the resumed
/// service version.
///
/// [Migration workflow]: https://exonum.com/doc/version/latest/architecture/services/#data-migrations
fn resume(
&self,
_context: ExecutionContext<'_>,
_params: Vec<u8>,
) -> Result<(), ExecutionError> {
Ok(())
}
/// Performs storage operations on behalf of the service before processing any transaction
/// in the block.
///
/// The default implementation does nothing and returns `Ok(())`.
///
/// Services should not rely on a particular ordering of `Service::before_transactions`
/// invocations among services.
fn before_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
/// Performs storage operations on behalf of the service after processing all transactions
/// in the block.
///
/// The default implementation does nothing and returns `Ok(())`.
///
/// Note that if service was added in the genesis block, it will be activated immediately and
/// thus `after_transactions` will be invoked for such a service after the genesis block creation.
/// If you aren't interested in the processing of for the genesis block, you can use
/// [`ExecutionContext::in_genesis_block`] method and exit early if `true` is returned.
///
/// Invocation of the `height()` method of the core blockchain schema will **panic**
/// if invoked within `after_transactions` of the genesis block. If you are going
/// to process the genesis block and need to know current height, use the `next_height()` method
/// to infer the current blockchain height.
///
/// Services should not rely on a particular ordering of `Service::after_transactions`
/// invocations among services.
///
/// [`ExecutionContext::in_genesis_block`]: struct.ExecutionContext.html#method.in_genesis_block
fn
|
(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
/// Handles block commit event.
///
/// This handler is a callback which is invoked by the blockchain
/// after each block commit. For example, a service can broadcast one or more transactions
/// if a specific condition has occurred.
///
/// The default implementation does nothing.
///
/// Try not to perform long operations in this handler since it is executed
/// on the consensus thread.
fn after_commit(&self, _context: AfterCommitContext<'_>) {}
/// Attaches the request handlers of the service API to the Exonum API schema.
///
/// The default implementation does nothing (i.e., does not provide any API for the service).
///
/// The request handlers are mounted on the `/api/services/{instance_name}` path at the
/// listen address of every full node in the blockchain network.
fn wire_api(&self, _builder: &mut ServiceApiBuilder) {}
}
/// Describes a service instance factory for the specific Rust artifact.
///
/// Usually, `ServiceFactory` can be derived using the
/// [`ServiceFactory`](index.html#examples) macro.
pub trait ServiceFactory: Send + Debug +'static {
/// Returns the unique artifact identifier corresponding to the factory.
fn artifact_id(&self) -> ArtifactId;
/// Returns the Protobuf specification used by the instances of this service.
fn artifact_protobuf_spec(&self) -> ArtifactProtobufSpec;
/// Creates a new service instance.
fn create_instance(&self) -> Box<dyn Service>;
}
#[allow(clippy::use_self)] // false positive
impl<T> From<T> for Box<dyn ServiceFactory>
where
T: ServiceFactory,
{
fn from(factory: T) -> Self {
Box::new(factory) as Self
}
}
/// Provides default instance configuration parameters for `ServiceFactory`.
pub trait DefaultInstance: ServiceFactory {
/// Default id for a service.
const INSTANCE_ID: InstanceId;
/// Default name for a service.
const INSTANCE_NAME: &'static str;
/// Creates default instance configuration parameters for the service.
fn default_instance(&self) -> InstanceInitParams {
self.artifact_id()
.into_default_instance(Self::INSTANCE_ID, Self::INSTANCE_NAME)
}
}
/// Provide context for the `after_commit` handler.
pub struct AfterCommitContext<'a> {
/// Reference to the dispatcher mailbox.
mailbox: &'a mut Mailbox,
/// Read-only snapshot of the current blockchain state.
snapshot: &'a dyn Snapshot,
/// Transaction broadcaster.
broadcaster: Broadcaster,
/// ID of the node as a validator.
validator_id: Option<ValidatorId>,
/// Current status of the service.
status: InstanceStatus,
}
impl<'a> AfterCommitContext<'a> {
/// Creates a new `AfterCommit` context.
pub(crate) fn new(
mailbox: &'a mut Mailbox,
instance: InstanceDescriptor,
snapshot: &'a dyn Snapshot,
service_keypair: &'a KeyPair,
tx_sender: &'a ApiSender,
validator_id: Option<ValidatorId>,
) -> Self {
let status = snapshot
.for_dispatcher()
.get_instance(instance.id)
.unwrap_or_else(|| {
panic!("BUG: Cannot find instance state for service `{}`", instance);
})
.status
.expect("BUG: status for a service receiving `after_commit` hook cannot be `None`");
Self {
mailbox,
snapshot,
validator_id,
broadcaster: Broadcaster::new(instance, service_keypair.clone(), tx_sender.clone()),
status,
}
}
/// Returns blockchain data for the snapshot associated with this context.
pub fn data(&self) -> BlockchainData<&'a dyn Snapshot> {
BlockchainData::new(self.snapshot, &self.broadcaster.instance().name)
}
/// Returns snapshot of the data for the executing service.
pub fn service_data(&self) -> Prefixed<&'a dyn Snapshot> {
self.data().for_executing_service()
}
/// Returns a current blockchain height. This height is "height of the latest committed block".
pub fn height(&self) -> Height {
// TODO Perhaps we should optimize this method [ECR-3222]
self.data().for_core().height()
}
/// Returns the service key of this node.
pub fn service_key(&self) -> PublicKey {
self.broadcaster.service_keypair.public_key()
}
/// Returns the ID of this node as a validator. If the node is not a validator, returns `None`.
pub fn validator_id(&self) -> Option<ValidatorId> {
self.validator_id
}
/// Returns the current status of the service.
pub fn status(&self) -> &InstanceStatus {
&self.status
}
/// Returns a transaction broadcaster if the current node is a validator and the service
/// is active (i.e., can process transactions). If these conditions do not hold, returns `None`.
pub fn broadcaster(&self) -> Option<Broadcaster> {
self.validator_id?;
if self.status.is_active() {
Some(self.broadcaster.clone())
} else {
None
}
}
/// Returns a transaction broadcaster regardless of the node status (validator or auditor)
/// and the service status (active or not).
///
/// # Safety
///
/// Transactions for non-active services will not be broadcast successfully; they will be
/// filtered on the receiving nodes as ones that cannot (currently) be processed.
pub fn generic_broadcaster(&self) -> Broadcaster {
self.broadcaster.clone()
}
/// Provides a privileged interface to the supervisor service.
///
/// `None` will be returned if the caller is not a supervisor.
#[doc(hidden)]
pub fn supervisor_extensions(&mut self) -> Option<SupervisorExtensions<'_>> {
if!is_supervisor(self.broadcaster.instance().id) {
return None;
}
Some(SupervisorExtensions {
mailbox: &mut *self.mailbox,
})
}
}
/// Transaction broadcaster.
///
/// Transaction broadcast allows a service to create transactions in the `after_commit`
/// handler or the HTTP API handlers and broadcast them to the connected Exonum nodes.
/// The transactions are addressed to the executing service instance and are signed
/// by the service keypair of the node.
///
/// Broadcasting functionality is primarily useful for services that receive information
/// from outside the blockchain and need to translate it to transactions. As an example,
/// a time oracle service may broadcast local node time and build the blockchain-wide time
/// by processing corresponding transactions.
///
/// # Examples
///
/// Using `Broadcaster` in service logic:
///
/// ```
/// # use exonum_derive::*;
/// use exonum::runtime::{ExecutionContext, ExecutionError};
/// use exonum_rust_runtime::{AfterCommitContext, Service};
///
/// #[exonum_interface]
/// trait MyInterface<Ctx> {
/// type Output;
/// #[interface_method(id = 0)]
/// fn publish_string(&self, ctx: Ctx, value: String) -> Self::Output;
/// }
///
/// #[derive(Debug, ServiceDispatcher, ServiceFactory)]
/// #[service_dispatcher(implements("MyInterface"))]
/// struct MyService;
///
/// impl MyInterface<ExecutionContext<'_>> for MyService {
/// // implementation skipped...
/// # type Output = Result<(), ExecutionError>;
/// # fn publish_string(&self, ctx: ExecutionContext<'_>, value: String) -> Self::Output {
/// # Ok(())
/// # }
/// }
///
/// impl Service for MyService {
/// fn after_commit(&self, ctx: AfterCommitContext<'_>) {
/// if let Some(broadcaster) = ctx.broadcaster() {
/// // Broadcast a `do_something` transaction with
/// // the specified payload. We swallow an error in this case
/// // (in a more thorough setup, it could be logged).
/// broadcaster.blocking().publish_string((), "!".to_owned()).ok();
/// }
/// }
/// }
/// ```
#[derive(Debug, Clone)]
pub struct Broadcaster {
instance: InstanceDescriptor,
service_keypair: KeyPair,
tx_sender: ApiSender,
}
impl Broadcaster {
/// Creates a new broadcaster.
pub(super) fn new(
instance: InstanceDescriptor,
service_keypair: KeyPair,
tx_sender: ApiSender,
) -> Self {
Self {
instance,
service_keypair,
tx_sender,
}
}
/// Returns a synchronous broadcaster that blocks the current thread to broadcast transaction.
pub fn blocking(self) -> BlockingBroadcaster {
BlockingBroadcaster(self)
}
pub(super) fn keypair(&self) -> &KeyPair {
&self.service_keypair
}
pub(super) fn instance(&self) -> &InstanceDescriptor {
&self.instance
}
}
/// Signs and asynchronously broadcasts a transaction to the other nodes in the network.
///
/// The transaction is signed by the service keypair of the node. The same input transaction
/// will lead to the identical transaction being broadcast. If this is undesired, add a nonce
/// field to the input transaction (e.g., a `u64`) and change it between the calls.
///
/// # Return value
///
/// Returns the hash of the created transaction, or an error if the transaction cannot be
/// broadcast. An error means that the node is being shut down.
impl GenericCall<()> for Broadcaster {
type Output = BoxFuture<'static, Result<Hash, SendError>>;
fn generic_call(&self, _ctx: (), method: MethodDescriptor<'_>, args: Vec<u8>) -> Self::Output {
let msg = self
.service_keypair
.clone()
.generic_call(self.instance().id, method, args);
let tx_hash = msg.object_hash();
let tx_sender = self.tx_sender.clone();
async move {
tx_sender.broadcast_transaction(msg).await?;
Ok(tx_hash)
}
.boxed()
}
}
/// A wrapper around the [`Broadcaster`] to broadcast transactions synchronously.
///
/// [`Broadcaster`]: struct.Broadcaster.html
#[derive(Debug, Clone)]
pub struct BlockingBroadcaster(Broadcaster);
/// Signs and synchronously broadcasts a transaction to the other nodes in the network.
///
/// The transaction is signed by the service keypair of the node. The same input transaction
/// will lead to the identical transaction being broadcast. If this is undesired, add a nonce
/// field to the input transaction (e.g., a `u64`) and change it between the calls.
///
/// # Return value
///
/// Returns the hash of the created transaction, or an error if the transaction cannot be
/// broadcast. An error means that the node is being shut down.
impl GenericCall<()> for BlockingBroadcaster {
type Output = Result<Hash, SendError>;
fn generic_call(&self, _ctx: (), method: MethodDescriptor<'_>, args: Vec<u8>) -> Self::Output {
block_on(self.0.generic_call((), method, args))
}
}
/// Extended blockchain interface for the service instance authorized as a supervisor.
#[derive(Debug)]
pub struct SupervisorExtensions<'a> {
mailbox: &'a mut Mailbox,
}
impl SupervisorExtensions<'_> {
/// Starts the deployment of an artifact. The provided callback is executed after
/// the deployment is completed.
pub fn start_deploy(
&mut self,
artifact: ArtifactId,
spec: impl BinaryValue,
then: impl FnOnce(Result<(), ExecutionError>) -> Result<(), ExecutionError> + Send +'static,
) {
let action = DispatcherAction::StartDeploy {
artifact,
spec: spec.into_bytes(),
then: Box::new(then),
};
self.mailbox.push(action);
}
}
impl Debug for AfterCommitContext<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AfterCommitContext")
.field("instance", &self.broadcaster.instance)
.finish()
}
}
fn is_supervisor(instance_id: InstanceId) -> bool {
instance_id == exonum::runtime::SUPERVISOR_INSTANCE_ID
}
|
after_transactions
|
identifier_name
|
test.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*! Various utility functions useful for writing I/O tests */
#![macro_escape]
use libc;
use os;
use prelude::*;
use std::io::net::ip::*;
use sync::atomics::{AtomicUint, INIT_ATOMIC_UINT, Relaxed};
macro_rules! iotest (
{ fn $name:ident() $b:block $(#[$a:meta])* } => (
mod $name {
#![allow(unused_imports)]
use super::super::*;
use super::*;
use io;
use prelude::*;
use io::*;
use io::fs::*;
use io::test::*;
use io::net::tcp::*;
use io::net::ip::*;
use io::net::udp::*;
#[cfg(unix)]
use io::net::unix::*;
use io::timer::*;
use io::process::*;
use unstable::running_on_valgrind;
use str;
fn f() $b
$(#[$a])* #[test] fn green() { f() }
$(#[$a])* #[test] fn native() {
use native;
let (tx, rx) = channel();
native::task::spawn(proc() { tx.send(f()) });
rx.recv();
}
}
)
)
/// Get a port number, starting at 9600, for use in tests
pub fn next_test_port() -> u16 {
static mut next_offset: AtomicUint = INIT_ATOMIC_UINT;
unsafe {
base_port() + next_offset.fetch_add(1, Relaxed) as u16
}
}
/// Get a temporary path which could be the location of a unix socket
pub fn next_test_unix() -> Path {
static mut COUNT: AtomicUint = INIT_ATOMIC_UINT;
// base port and pid are an attempt to be unique between multiple
// test-runners of different configurations running on one
// buildbot, the count is to be unique within this executable.
let string = format!("rust-test-unix-path-{}-{}-{}",
base_port(),
unsafe {libc::getpid()},
unsafe {COUNT.fetch_add(1, Relaxed)});
if cfg!(unix) {
os::tmpdir().join(string)
} else {
Path::new(r"\\.\pipe\" + string)
}
}
/// Get a unique IPv4 localhost:port pair starting at 9600
pub fn next_test_ip4() -> SocketAddr {
SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: next_test_port() }
}
/// Get a unique IPv6 localhost:port pair starting at 9600
pub fn next_test_ip6() -> SocketAddr {
SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: next_test_port() }
}
/*
XXX: Welcome to MegaHack City.
The bots run multiple builds at the same time, and these builds
all want to use ports. This function figures out which workspace
it is running in and assigns a port range based on it.
*/
fn base_port() -> u16 {
let base = 9600u16;
let range = 1000u16;
let bases = [
("32-opt", base + range * 1),
("32-nopt", base + range * 2),
("64-opt", base + range * 3),
("64-nopt", base + range * 4),
("64-opt-vg", base + range * 5),
("all-opt", base + range * 6),
("snap3", base + range * 7),
("dist", base + range * 8)
];
// FIXME (#9639): This needs to handle non-utf8 paths
let path = os::getcwd();
let path_s = path.as_str().unwrap();
let mut final_base = base;
for &(dir, base) in bases.iter() {
if path_s.contains(dir) {
final_base = base;
break;
}
}
return final_base;
}
/// Raises the file descriptor limit when running tests if necessary
pub fn raise_fd_limit() {
unsafe { darwin_fd_limit::raise_fd_limit() }
}
#[cfg(target_os="macos")]
#[allow(non_camel_case_types)]
mod darwin_fd_limit {
/*!
* darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the
|
* This fixes issue #7772.
*/
use libc;
type rlim_t = libc::uint64_t;
struct rlimit {
rlim_cur: rlim_t,
rlim_max: rlim_t
}
extern {
// name probably doesn't need to be mut, but the C function doesn't specify const
fn sysctl(name: *mut libc::c_int, namelen: libc::c_uint,
oldp: *mut libc::c_void, oldlenp: *mut libc::size_t,
newp: *mut libc::c_void, newlen: libc::size_t) -> libc::c_int;
fn getrlimit(resource: libc::c_int, rlp: *mut rlimit) -> libc::c_int;
fn setrlimit(resource: libc::c_int, rlp: *rlimit) -> libc::c_int;
}
static CTL_KERN: libc::c_int = 1;
static KERN_MAXFILESPERPROC: libc::c_int = 29;
static RLIMIT_NOFILE: libc::c_int = 8;
pub unsafe fn raise_fd_limit() {
// The strategy here is to fetch the current resource limits, read the kern.maxfilesperproc
// sysctl value, and bump the soft resource limit for maxfiles up to the sysctl value.
use ptr::mut_null;
use mem::size_of_val;
use os::last_os_error;
// Fetch the kern.maxfilesperproc value
let mut mib: [libc::c_int,..2] = [CTL_KERN, KERN_MAXFILESPERPROC];
let mut maxfiles: libc::c_int = 0;
let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t;
if sysctl(&mut mib[0], 2, &mut maxfiles as *mut libc::c_int as *mut libc::c_void, &mut size,
mut_null(), 0)!= 0 {
let err = last_os_error();
fail!("raise_fd_limit: error calling sysctl: {}", err);
}
// Fetch the current resource limits
let mut rlim = rlimit{rlim_cur: 0, rlim_max: 0};
if getrlimit(RLIMIT_NOFILE, &mut rlim)!= 0 {
let err = last_os_error();
fail!("raise_fd_limit: error calling getrlimit: {}", err);
}
// Bump the soft limit to the smaller of kern.maxfilesperproc and the hard limit
rlim.rlim_cur = ::cmp::min(maxfiles as rlim_t, rlim.rlim_max);
// Set our newly-increased resource limit
if setrlimit(RLIMIT_NOFILE, &rlim)!= 0 {
let err = last_os_error();
fail!("raise_fd_limit: error calling setrlimit: {}", err);
}
}
}
#[cfg(not(target_os="macos"))]
mod darwin_fd_limit {
pub unsafe fn raise_fd_limit() {}
}
|
* rlimit maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low
* for our multithreaded scheduler testing, depending on the number of cores available.
*
|
random_line_split
|
test.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*! Various utility functions useful for writing I/O tests */
#![macro_escape]
use libc;
use os;
use prelude::*;
use std::io::net::ip::*;
use sync::atomics::{AtomicUint, INIT_ATOMIC_UINT, Relaxed};
macro_rules! iotest (
{ fn $name:ident() $b:block $(#[$a:meta])* } => (
mod $name {
#![allow(unused_imports)]
use super::super::*;
use super::*;
use io;
use prelude::*;
use io::*;
use io::fs::*;
use io::test::*;
use io::net::tcp::*;
use io::net::ip::*;
use io::net::udp::*;
#[cfg(unix)]
use io::net::unix::*;
use io::timer::*;
use io::process::*;
use unstable::running_on_valgrind;
use str;
fn f() $b
$(#[$a])* #[test] fn green() { f() }
$(#[$a])* #[test] fn native() {
use native;
let (tx, rx) = channel();
native::task::spawn(proc() { tx.send(f()) });
rx.recv();
}
}
)
)
/// Get a port number, starting at 9600, for use in tests
pub fn next_test_port() -> u16 {
static mut next_offset: AtomicUint = INIT_ATOMIC_UINT;
unsafe {
base_port() + next_offset.fetch_add(1, Relaxed) as u16
}
}
/// Get a temporary path which could be the location of a unix socket
pub fn next_test_unix() -> Path {
static mut COUNT: AtomicUint = INIT_ATOMIC_UINT;
// base port and pid are an attempt to be unique between multiple
// test-runners of different configurations running on one
// buildbot, the count is to be unique within this executable.
let string = format!("rust-test-unix-path-{}-{}-{}",
base_port(),
unsafe {libc::getpid()},
unsafe {COUNT.fetch_add(1, Relaxed)});
if cfg!(unix)
|
else {
Path::new(r"\\.\pipe\" + string)
}
}
/// Get a unique IPv4 localhost:port pair starting at 9600
pub fn next_test_ip4() -> SocketAddr {
SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: next_test_port() }
}
/// Get a unique IPv6 localhost:port pair starting at 9600
pub fn next_test_ip6() -> SocketAddr {
SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: next_test_port() }
}
/*
XXX: Welcome to MegaHack City.
The bots run multiple builds at the same time, and these builds
all want to use ports. This function figures out which workspace
it is running in and assigns a port range based on it.
*/
fn base_port() -> u16 {
let base = 9600u16;
let range = 1000u16;
let bases = [
("32-opt", base + range * 1),
("32-nopt", base + range * 2),
("64-opt", base + range * 3),
("64-nopt", base + range * 4),
("64-opt-vg", base + range * 5),
("all-opt", base + range * 6),
("snap3", base + range * 7),
("dist", base + range * 8)
];
// FIXME (#9639): This needs to handle non-utf8 paths
let path = os::getcwd();
let path_s = path.as_str().unwrap();
let mut final_base = base;
for &(dir, base) in bases.iter() {
if path_s.contains(dir) {
final_base = base;
break;
}
}
return final_base;
}
/// Raises the file descriptor limit when running tests if necessary
pub fn raise_fd_limit() {
unsafe { darwin_fd_limit::raise_fd_limit() }
}
#[cfg(target_os="macos")]
#[allow(non_camel_case_types)]
mod darwin_fd_limit {
/*!
* darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the
* rlimit maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low
* for our multithreaded scheduler testing, depending on the number of cores available.
*
* This fixes issue #7772.
*/
use libc;
type rlim_t = libc::uint64_t;
struct rlimit {
rlim_cur: rlim_t,
rlim_max: rlim_t
}
extern {
// name probably doesn't need to be mut, but the C function doesn't specify const
fn sysctl(name: *mut libc::c_int, namelen: libc::c_uint,
oldp: *mut libc::c_void, oldlenp: *mut libc::size_t,
newp: *mut libc::c_void, newlen: libc::size_t) -> libc::c_int;
fn getrlimit(resource: libc::c_int, rlp: *mut rlimit) -> libc::c_int;
fn setrlimit(resource: libc::c_int, rlp: *rlimit) -> libc::c_int;
}
static CTL_KERN: libc::c_int = 1;
static KERN_MAXFILESPERPROC: libc::c_int = 29;
static RLIMIT_NOFILE: libc::c_int = 8;
pub unsafe fn raise_fd_limit() {
// The strategy here is to fetch the current resource limits, read the kern.maxfilesperproc
// sysctl value, and bump the soft resource limit for maxfiles up to the sysctl value.
use ptr::mut_null;
use mem::size_of_val;
use os::last_os_error;
// Fetch the kern.maxfilesperproc value
let mut mib: [libc::c_int,..2] = [CTL_KERN, KERN_MAXFILESPERPROC];
let mut maxfiles: libc::c_int = 0;
let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t;
if sysctl(&mut mib[0], 2, &mut maxfiles as *mut libc::c_int as *mut libc::c_void, &mut size,
mut_null(), 0)!= 0 {
let err = last_os_error();
fail!("raise_fd_limit: error calling sysctl: {}", err);
}
// Fetch the current resource limits
let mut rlim = rlimit{rlim_cur: 0, rlim_max: 0};
if getrlimit(RLIMIT_NOFILE, &mut rlim)!= 0 {
let err = last_os_error();
fail!("raise_fd_limit: error calling getrlimit: {}", err);
}
// Bump the soft limit to the smaller of kern.maxfilesperproc and the hard limit
rlim.rlim_cur = ::cmp::min(maxfiles as rlim_t, rlim.rlim_max);
// Set our newly-increased resource limit
if setrlimit(RLIMIT_NOFILE, &rlim)!= 0 {
let err = last_os_error();
fail!("raise_fd_limit: error calling setrlimit: {}", err);
}
}
}
#[cfg(not(target_os="macos"))]
mod darwin_fd_limit {
pub unsafe fn raise_fd_limit() {}
}
|
{
os::tmpdir().join(string)
}
|
conditional_block
|
test.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*! Various utility functions useful for writing I/O tests */
#![macro_escape]
use libc;
use os;
use prelude::*;
use std::io::net::ip::*;
use sync::atomics::{AtomicUint, INIT_ATOMIC_UINT, Relaxed};
macro_rules! iotest (
{ fn $name:ident() $b:block $(#[$a:meta])* } => (
mod $name {
#![allow(unused_imports)]
use super::super::*;
use super::*;
use io;
use prelude::*;
use io::*;
use io::fs::*;
use io::test::*;
use io::net::tcp::*;
use io::net::ip::*;
use io::net::udp::*;
#[cfg(unix)]
use io::net::unix::*;
use io::timer::*;
use io::process::*;
use unstable::running_on_valgrind;
use str;
fn f() $b
$(#[$a])* #[test] fn green() { f() }
$(#[$a])* #[test] fn native() {
use native;
let (tx, rx) = channel();
native::task::spawn(proc() { tx.send(f()) });
rx.recv();
}
}
)
)
/// Get a port number, starting at 9600, for use in tests
pub fn
|
() -> u16 {
static mut next_offset: AtomicUint = INIT_ATOMIC_UINT;
unsafe {
base_port() + next_offset.fetch_add(1, Relaxed) as u16
}
}
/// Get a temporary path which could be the location of a unix socket
pub fn next_test_unix() -> Path {
static mut COUNT: AtomicUint = INIT_ATOMIC_UINT;
// base port and pid are an attempt to be unique between multiple
// test-runners of different configurations running on one
// buildbot, the count is to be unique within this executable.
let string = format!("rust-test-unix-path-{}-{}-{}",
base_port(),
unsafe {libc::getpid()},
unsafe {COUNT.fetch_add(1, Relaxed)});
if cfg!(unix) {
os::tmpdir().join(string)
} else {
Path::new(r"\\.\pipe\" + string)
}
}
/// Get a unique IPv4 localhost:port pair starting at 9600
pub fn next_test_ip4() -> SocketAddr {
SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: next_test_port() }
}
/// Get a unique IPv6 localhost:port pair starting at 9600
pub fn next_test_ip6() -> SocketAddr {
SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: next_test_port() }
}
/*
XXX: Welcome to MegaHack City.
The bots run multiple builds at the same time, and these builds
all want to use ports. This function figures out which workspace
it is running in and assigns a port range based on it.
*/
fn base_port() -> u16 {
let base = 9600u16;
let range = 1000u16;
let bases = [
("32-opt", base + range * 1),
("32-nopt", base + range * 2),
("64-opt", base + range * 3),
("64-nopt", base + range * 4),
("64-opt-vg", base + range * 5),
("all-opt", base + range * 6),
("snap3", base + range * 7),
("dist", base + range * 8)
];
// FIXME (#9639): This needs to handle non-utf8 paths
let path = os::getcwd();
let path_s = path.as_str().unwrap();
let mut final_base = base;
for &(dir, base) in bases.iter() {
if path_s.contains(dir) {
final_base = base;
break;
}
}
return final_base;
}
/// Raises the file descriptor limit when running tests if necessary
pub fn raise_fd_limit() {
unsafe { darwin_fd_limit::raise_fd_limit() }
}
#[cfg(target_os="macos")]
#[allow(non_camel_case_types)]
mod darwin_fd_limit {
/*!
* darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the
* rlimit maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low
* for our multithreaded scheduler testing, depending on the number of cores available.
*
* This fixes issue #7772.
*/
use libc;
type rlim_t = libc::uint64_t;
struct rlimit {
rlim_cur: rlim_t,
rlim_max: rlim_t
}
extern {
// name probably doesn't need to be mut, but the C function doesn't specify const
fn sysctl(name: *mut libc::c_int, namelen: libc::c_uint,
oldp: *mut libc::c_void, oldlenp: *mut libc::size_t,
newp: *mut libc::c_void, newlen: libc::size_t) -> libc::c_int;
fn getrlimit(resource: libc::c_int, rlp: *mut rlimit) -> libc::c_int;
fn setrlimit(resource: libc::c_int, rlp: *rlimit) -> libc::c_int;
}
static CTL_KERN: libc::c_int = 1;
static KERN_MAXFILESPERPROC: libc::c_int = 29;
static RLIMIT_NOFILE: libc::c_int = 8;
pub unsafe fn raise_fd_limit() {
// The strategy here is to fetch the current resource limits, read the kern.maxfilesperproc
// sysctl value, and bump the soft resource limit for maxfiles up to the sysctl value.
use ptr::mut_null;
use mem::size_of_val;
use os::last_os_error;
// Fetch the kern.maxfilesperproc value
let mut mib: [libc::c_int,..2] = [CTL_KERN, KERN_MAXFILESPERPROC];
let mut maxfiles: libc::c_int = 0;
let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t;
if sysctl(&mut mib[0], 2, &mut maxfiles as *mut libc::c_int as *mut libc::c_void, &mut size,
mut_null(), 0)!= 0 {
let err = last_os_error();
fail!("raise_fd_limit: error calling sysctl: {}", err);
}
// Fetch the current resource limits
let mut rlim = rlimit{rlim_cur: 0, rlim_max: 0};
if getrlimit(RLIMIT_NOFILE, &mut rlim)!= 0 {
let err = last_os_error();
fail!("raise_fd_limit: error calling getrlimit: {}", err);
}
// Bump the soft limit to the smaller of kern.maxfilesperproc and the hard limit
rlim.rlim_cur = ::cmp::min(maxfiles as rlim_t, rlim.rlim_max);
// Set our newly-increased resource limit
if setrlimit(RLIMIT_NOFILE, &rlim)!= 0 {
let err = last_os_error();
fail!("raise_fd_limit: error calling setrlimit: {}", err);
}
}
}
#[cfg(not(target_os="macos"))]
mod darwin_fd_limit {
pub unsafe fn raise_fd_limit() {}
}
|
next_test_port
|
identifier_name
|
foreign-call-no-runtime.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate libc;
use std::cast;
use std::unstable::run_in_bare_thread;
#[link(name = "rustrt")]
extern {
fn rust_dbg_call(cb: extern "C" fn(libc::uintptr_t),
data: libc::uintptr_t) -> libc::uintptr_t;
}
pub fn main() {
unsafe {
run_in_bare_thread(proc() {
let i = &100;
rust_dbg_call(callback, cast::transmute(i));
});
}
}
extern fn
|
(data: libc::uintptr_t) {
unsafe {
let data: *int = cast::transmute(data);
assert_eq!(*data, 100);
}
}
|
callback
|
identifier_name
|
foreign-call-no-runtime.rs
|
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate libc;
use std::cast;
use std::unstable::run_in_bare_thread;
#[link(name = "rustrt")]
extern {
fn rust_dbg_call(cb: extern "C" fn(libc::uintptr_t),
data: libc::uintptr_t) -> libc::uintptr_t;
}
pub fn main() {
unsafe {
run_in_bare_thread(proc() {
let i = &100;
rust_dbg_call(callback, cast::transmute(i));
});
}
}
extern fn callback(data: libc::uintptr_t) {
unsafe {
let data: *int = cast::transmute(data);
assert_eq!(*data, 100);
}
}
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
|
random_line_split
|
|
foreign-call-no-runtime.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate libc;
use std::cast;
use std::unstable::run_in_bare_thread;
#[link(name = "rustrt")]
extern {
fn rust_dbg_call(cb: extern "C" fn(libc::uintptr_t),
data: libc::uintptr_t) -> libc::uintptr_t;
}
pub fn main() {
unsafe {
run_in_bare_thread(proc() {
let i = &100;
rust_dbg_call(callback, cast::transmute(i));
});
}
}
extern fn callback(data: libc::uintptr_t)
|
{
unsafe {
let data: *int = cast::transmute(data);
assert_eq!(*data, 100);
}
}
|
identifier_body
|
|
quote-tokens.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod syntax;
use syntax::ext::base::ExtCtxt;
fn
|
(cx: @ExtCtxt) {
let e_toks : ~[syntax::ast::token_tree] = quote_tokens!(cx, 1 + 2);
let p_toks : ~[syntax::ast::token_tree] = quote_tokens!(cx, (x, 1.. 4, *));
let a: @syntax::ast::expr = quote_expr!(cx, 1 + 2);
let _b: Option<@syntax::ast::item> = quote_item!(cx, static foo : int = $e_toks; );
let _c: @syntax::ast::pat = quote_pat!(cx, (x, 1.. 4, *) );
let _d: @syntax::ast::stmt = quote_stmt!(cx, let x = $a; );
let _e: @syntax::ast::expr = quote_expr!(cx, match foo { $p_toks => 10 } );
}
fn main() {
}
|
syntax_extension
|
identifier_name
|
quote-tokens.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod syntax;
use syntax::ext::base::ExtCtxt;
fn syntax_extension(cx: @ExtCtxt)
|
fn main() {
}
|
{
let e_toks : ~[syntax::ast::token_tree] = quote_tokens!(cx, 1 + 2);
let p_toks : ~[syntax::ast::token_tree] = quote_tokens!(cx, (x, 1 .. 4, *));
let a: @syntax::ast::expr = quote_expr!(cx, 1 + 2);
let _b: Option<@syntax::ast::item> = quote_item!(cx, static foo : int = $e_toks; );
let _c: @syntax::ast::pat = quote_pat!(cx, (x, 1 .. 4, *) );
let _d: @syntax::ast::stmt = quote_stmt!(cx, let x = $a; );
let _e: @syntax::ast::expr = quote_expr!(cx, match foo { $p_toks => 10 } );
}
|
identifier_body
|
quote-tokens.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod syntax;
use syntax::ext::base::ExtCtxt;
|
let e_toks : ~[syntax::ast::token_tree] = quote_tokens!(cx, 1 + 2);
let p_toks : ~[syntax::ast::token_tree] = quote_tokens!(cx, (x, 1.. 4, *));
let a: @syntax::ast::expr = quote_expr!(cx, 1 + 2);
let _b: Option<@syntax::ast::item> = quote_item!(cx, static foo : int = $e_toks; );
let _c: @syntax::ast::pat = quote_pat!(cx, (x, 1.. 4, *) );
let _d: @syntax::ast::stmt = quote_stmt!(cx, let x = $a; );
let _e: @syntax::ast::expr = quote_expr!(cx, match foo { $p_toks => 10 } );
}
fn main() {
}
|
fn syntax_extension(cx: @ExtCtxt) {
|
random_line_split
|
repeater.rs
|
use futures::prelude::*;
use irc::client::prelude::*;
#[tokio::main]
async fn main() -> irc::error::Result<()> {
let config = Config {
nickname: Some("pickles".to_owned()),
server: Some("chat.freenode.net".to_owned()),
channels: vec!["#rust-spam".to_owned()],
burst_window_length: Some(4),
max_messages_in_burst: Some(4),
..Default::default()
};
let mut client = Client::from_config(config).await?;
client.identify()?;
let mut stream = client.stream()?;
let sender = client.sender();
loop {
let message = stream.select_next_some().await?;
if let Command::PRIVMSG(ref target, ref msg) = message.command {
if msg.starts_with(&*client.current_nickname())
|
}
}
}
|
{
let tokens: Vec<_> = msg.split(' ').collect();
if tokens.len() > 2 {
let n = tokens[0].len() + tokens[1].len() + 2;
if let Ok(count) = tokens[1].parse::<u8>() {
for _ in 0..count {
sender.send_privmsg(
message.response_target().unwrap_or(target),
&msg[n..],
)?;
}
}
}
}
|
conditional_block
|
repeater.rs
|
use futures::prelude::*;
use irc::client::prelude::*;
#[tokio::main]
async fn
|
() -> irc::error::Result<()> {
let config = Config {
nickname: Some("pickles".to_owned()),
server: Some("chat.freenode.net".to_owned()),
channels: vec!["#rust-spam".to_owned()],
burst_window_length: Some(4),
max_messages_in_burst: Some(4),
..Default::default()
};
let mut client = Client::from_config(config).await?;
client.identify()?;
let mut stream = client.stream()?;
let sender = client.sender();
loop {
let message = stream.select_next_some().await?;
if let Command::PRIVMSG(ref target, ref msg) = message.command {
if msg.starts_with(&*client.current_nickname()) {
let tokens: Vec<_> = msg.split(' ').collect();
if tokens.len() > 2 {
let n = tokens[0].len() + tokens[1].len() + 2;
if let Ok(count) = tokens[1].parse::<u8>() {
for _ in 0..count {
sender.send_privmsg(
message.response_target().unwrap_or(target),
&msg[n..],
)?;
}
}
}
}
}
}
}
|
main
|
identifier_name
|
repeater.rs
|
use futures::prelude::*;
use irc::client::prelude::*;
#[tokio::main]
async fn main() -> irc::error::Result<()>
|
if msg.starts_with(&*client.current_nickname()) {
let tokens: Vec<_> = msg.split(' ').collect();
if tokens.len() > 2 {
let n = tokens[0].len() + tokens[1].len() + 2;
if let Ok(count) = tokens[1].parse::<u8>() {
for _ in 0..count {
sender.send_privmsg(
message.response_target().unwrap_or(target),
&msg[n..],
)?;
}
}
}
}
}
}
}
|
{
let config = Config {
nickname: Some("pickles".to_owned()),
server: Some("chat.freenode.net".to_owned()),
channels: vec!["#rust-spam".to_owned()],
burst_window_length: Some(4),
max_messages_in_burst: Some(4),
..Default::default()
};
let mut client = Client::from_config(config).await?;
client.identify()?;
let mut stream = client.stream()?;
let sender = client.sender();
loop {
let message = stream.select_next_some().await?;
if let Command::PRIVMSG(ref target, ref msg) = message.command {
|
identifier_body
|
repeater.rs
|
use futures::prelude::*;
use irc::client::prelude::*;
#[tokio::main]
async fn main() -> irc::error::Result<()> {
let config = Config {
nickname: Some("pickles".to_owned()),
server: Some("chat.freenode.net".to_owned()),
channels: vec!["#rust-spam".to_owned()],
burst_window_length: Some(4),
max_messages_in_burst: Some(4),
..Default::default()
};
let mut client = Client::from_config(config).await?;
client.identify()?;
let mut stream = client.stream()?;
let sender = client.sender();
loop {
|
let message = stream.select_next_some().await?;
if let Command::PRIVMSG(ref target, ref msg) = message.command {
if msg.starts_with(&*client.current_nickname()) {
let tokens: Vec<_> = msg.split(' ').collect();
if tokens.len() > 2 {
let n = tokens[0].len() + tokens[1].len() + 2;
if let Ok(count) = tokens[1].parse::<u8>() {
for _ in 0..count {
sender.send_privmsg(
message.response_target().unwrap_or(target),
&msg[n..],
)?;
}
}
}
}
}
}
}
|
random_line_split
|
|
build.rs
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
|
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
extern crate ar;
use std::{path::PathBuf, process::Command};
use ar::Builder;
use std::fs::File;
fn main() {
let mut out_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
out_dir.push("lib");
if!out_dir.is_dir() {
std::fs::create_dir(&out_dir).unwrap();
}
let obj_file = out_dir.join("test.o");
let lib_file = out_dir.join("libtest.a");
let output = Command::new(concat!(
env!("CARGO_MANIFEST_DIR"),
"/src/build_test_lib.py"
))
.arg(&out_dir)
.output()
.expect("Failed to execute command");
assert!(
obj_file.exists(),
"Could not build tvm lib: {}",
String::from_utf8(output.stderr)
.unwrap()
.trim()
.split("\n")
.last()
.unwrap_or("")
);
let mut builder = Builder::new(File::create(lib_file).unwrap());
builder.append_path(obj_file).unwrap();
println!("cargo:rustc-link-lib=static=test");
println!("cargo:rustc-link-search=native={}", out_dir.display());
}
|
*
|
random_line_split
|
build.rs
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
extern crate ar;
use std::{path::PathBuf, process::Command};
use ar::Builder;
use std::fs::File;
fn
|
() {
let mut out_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
out_dir.push("lib");
if!out_dir.is_dir() {
std::fs::create_dir(&out_dir).unwrap();
}
let obj_file = out_dir.join("test.o");
let lib_file = out_dir.join("libtest.a");
let output = Command::new(concat!(
env!("CARGO_MANIFEST_DIR"),
"/src/build_test_lib.py"
))
.arg(&out_dir)
.output()
.expect("Failed to execute command");
assert!(
obj_file.exists(),
"Could not build tvm lib: {}",
String::from_utf8(output.stderr)
.unwrap()
.trim()
.split("\n")
.last()
.unwrap_or("")
);
let mut builder = Builder::new(File::create(lib_file).unwrap());
builder.append_path(obj_file).unwrap();
println!("cargo:rustc-link-lib=static=test");
println!("cargo:rustc-link-search=native={}", out_dir.display());
}
|
main
|
identifier_name
|
build.rs
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
extern crate ar;
use std::{path::PathBuf, process::Command};
use ar::Builder;
use std::fs::File;
fn main()
|
"Could not build tvm lib: {}",
String::from_utf8(output.stderr)
.unwrap()
.trim()
.split("\n")
.last()
.unwrap_or("")
);
let mut builder = Builder::new(File::create(lib_file).unwrap());
builder.append_path(obj_file).unwrap();
println!("cargo:rustc-link-lib=static=test");
println!("cargo:rustc-link-search=native={}", out_dir.display());
}
|
{
let mut out_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
out_dir.push("lib");
if !out_dir.is_dir() {
std::fs::create_dir(&out_dir).unwrap();
}
let obj_file = out_dir.join("test.o");
let lib_file = out_dir.join("libtest.a");
let output = Command::new(concat!(
env!("CARGO_MANIFEST_DIR"),
"/src/build_test_lib.py"
))
.arg(&out_dir)
.output()
.expect("Failed to execute command");
assert!(
obj_file.exists(),
|
identifier_body
|
build.rs
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
extern crate ar;
use std::{path::PathBuf, process::Command};
use ar::Builder;
use std::fs::File;
fn main() {
let mut out_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
out_dir.push("lib");
if!out_dir.is_dir()
|
let obj_file = out_dir.join("test.o");
let lib_file = out_dir.join("libtest.a");
let output = Command::new(concat!(
env!("CARGO_MANIFEST_DIR"),
"/src/build_test_lib.py"
))
.arg(&out_dir)
.output()
.expect("Failed to execute command");
assert!(
obj_file.exists(),
"Could not build tvm lib: {}",
String::from_utf8(output.stderr)
.unwrap()
.trim()
.split("\n")
.last()
.unwrap_or("")
);
let mut builder = Builder::new(File::create(lib_file).unwrap());
builder.append_path(obj_file).unwrap();
println!("cargo:rustc-link-lib=static=test");
println!("cargo:rustc-link-search=native={}", out_dir.display());
}
|
{
std::fs::create_dir(&out_dir).unwrap();
}
|
conditional_block
|
filesearch.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_camel_case_types)]
pub use self::FileMatch::*;
use std::cell::RefCell;
use std::collections::HashSet;
use std::io::fs::PathExtensions;
use std::io::fs;
use std::os;
use util::fs as myfs;
pub enum FileMatch { FileMatches, FileDoesntMatch }
// A module for searching for libraries
// FIXME (#2658): I'm not happy how this module turned out. Should
// probably just be folded into cstore.
/// Functions with type `pick` take a parent directory as well as
/// a file found in that directory.
pub type pick<'a> = |path: &Path|: 'a -> FileMatch;
pub struct FileSearch<'a> {
pub sysroot: &'a Path,
pub addl_lib_search_paths: &'a RefCell<Vec<Path>>,
pub triple: &'a str,
}
impl<'a> FileSearch<'a> {
pub fn for_each_lib_search_path(&self, f: |&Path| -> FileMatch) {
let mut visited_dirs = HashSet::new();
let mut found = false;
debug!("filesearch: searching additional lib search paths [{}]",
self.addl_lib_search_paths.borrow().len());
for path in self.addl_lib_search_paths.borrow().iter() {
match f(path) {
FileMatches => found = true,
FileDoesntMatch => ()
}
visited_dirs.insert(path.as_vec().to_vec());
}
debug!("filesearch: searching lib path");
let tlib_path = make_target_lib_path(self.sysroot,
self.triple);
if!visited_dirs.contains(tlib_path.as_vec()) {
match f(&tlib_path) {
FileMatches => found = true,
FileDoesntMatch => ()
}
}
visited_dirs.insert(tlib_path.as_vec().to_vec());
// Try RUST_PATH
if!found {
let rustpath = rust_path();
for path in rustpath.iter() {
let tlib_path = make_rustpkg_lib_path(
self.sysroot, path, self.triple);
debug!("is {} in visited_dirs? {}", tlib_path.display(),
visited_dirs.contains(&tlib_path.as_vec().to_vec()));
if!visited_dirs.contains(tlib_path.as_vec()) {
visited_dirs.insert(tlib_path.as_vec().to_vec());
// Don't keep searching the RUST_PATH if one match turns up --
// if we did, we'd get a "multiple matching crates" error
match f(&tlib_path) {
FileMatches => {
break;
}
FileDoesntMatch => ()
}
}
}
}
}
pub fn get_lib_path(&self) -> Path {
make_target_lib_path(self.sysroot, self.triple)
}
pub fn search(&self, pick: pick) {
self.for_each_lib_search_path(|lib_search_path| {
debug!("searching {}", lib_search_path.display());
match fs::readdir(lib_search_path) {
Ok(files) => {
let mut rslt = FileDoesntMatch;
fn is_rlib(p: & &Path) -> bool {
p.extension_str() == Some("rlib")
}
// Reading metadata out of rlibs is faster, and if we find both
// an rlib and a dylib we only read one of the files of
// metadata, so in the name of speed, bring all rlib files to
// the front of the search list.
let files1 = files.iter().filter(|p| is_rlib(p));
let files2 = files.iter().filter(|p|!is_rlib(p));
for path in files1.chain(files2) {
debug!("testing {}", path.display());
let maybe_picked = pick(path);
match maybe_picked {
FileMatches => {
debug!("picked {}", path.display());
rslt = FileMatches;
}
FileDoesntMatch => {
debug!("rejected {}", path.display());
}
}
}
rslt
}
Err(..) => FileDoesntMatch,
}
});
}
pub fn new(sysroot: &'a Path,
triple: &'a str,
addl_lib_search_paths: &'a RefCell<Vec<Path>>) -> FileSearch<'a> {
debug!("using sysroot = {}, triple = {}", sysroot.display(), triple);
FileSearch {
sysroot: sysroot,
addl_lib_search_paths: addl_lib_search_paths,
triple: triple,
}
}
// Returns a list of directories where target-specific dylibs might be located.
pub fn get_dylib_search_paths(&self) -> Vec<Path> {
let mut paths = Vec::new();
self.for_each_lib_search_path(|lib_search_path| {
paths.push(lib_search_path.clone());
FileDoesntMatch
});
paths
}
// Returns a list of directories where target-specific tool binaries are located.
pub fn get_tools_search_paths(&self) -> Vec<Path> {
let mut p = Path::new(self.sysroot);
p.push(find_libdir(self.sysroot));
p.push(rustlibdir());
p.push(self.triple);
p.push("bin");
vec![p]
}
}
pub fn relative_target_lib_path(sysroot: &Path, target_triple: &str) -> Path {
let mut p = Path::new(find_libdir(sysroot));
assert!(p.is_relative());
p.push(rustlibdir());
p.push(target_triple);
p.push("lib");
p
}
fn make_target_lib_path(sysroot: &Path,
target_triple: &str) -> Path {
sysroot.join(&relative_target_lib_path(sysroot, target_triple))
}
fn make_rustpkg_lib_path(sysroot: &Path,
dir: &Path,
triple: &str) -> Path {
let mut p = dir.join(find_libdir(sysroot));
p.push(triple);
p
}
pub fn get_or_default_sysroot() -> Path {
// Follow symlinks. If the resolved path is relative, make it absolute.
fn canonicalize(path: Option<Path>) -> Option<Path> {
path.and_then(|path|
match myfs::realpath(&path) {
Ok(canon) => Some(canon),
Err(e) => panic!("failed to get realpath: {}", e),
})
}
match canonicalize(os::self_exe_name()) {
Some(mut p) => { p.pop(); p.pop(); p }
None => panic!("can't determine value for sysroot")
}
}
#[cfg(windows)]
static PATH_ENTRY_SEPARATOR: &'static str = ";";
#[cfg(not(windows))]
static PATH_ENTRY_SEPARATOR: &'static str = ":";
/// Returns RUST_PATH as a string, without default paths added
pub fn
|
() -> Option<String> {
os::getenv("RUST_PATH").map(|x| x.to_string())
}
/// Returns the value of RUST_PATH, as a list
/// of Paths. Includes default entries for, if they exist:
/// $HOME/.rust
/// DIR/.rust for any DIR that's the current working directory
/// or an ancestor of it
pub fn rust_path() -> Vec<Path> {
let mut env_rust_path: Vec<Path> = match get_rust_path() {
Some(env_path) => {
let env_path_components =
env_path.as_slice().split_str(PATH_ENTRY_SEPARATOR);
env_path_components.map(|s| Path::new(s)).collect()
}
None => Vec::new()
};
let mut cwd = os::getcwd().unwrap();
// now add in default entries
let cwd_dot_rust = cwd.join(".rust");
if!env_rust_path.contains(&cwd_dot_rust) {
env_rust_path.push(cwd_dot_rust);
}
if!env_rust_path.contains(&cwd) {
env_rust_path.push(cwd.clone());
}
loop {
if { let f = cwd.filename(); f.is_none() || f.unwrap() == b".." } {
break
}
cwd.set_filename(".rust");
if!env_rust_path.contains(&cwd) && cwd.exists() {
env_rust_path.push(cwd.clone());
}
cwd.pop();
}
let h = os::homedir();
for h in h.iter() {
let p = h.join(".rust");
if!env_rust_path.contains(&p) && p.exists() {
env_rust_path.push(p);
}
}
env_rust_path
}
// The name of the directory rustc expects libraries to be located.
// On Unix should be "lib", on windows "bin"
#[cfg(unix)]
fn find_libdir(sysroot: &Path) -> String {
// FIXME: This is a quick hack to make the rustc binary able to locate
// Rust libraries in Linux environments where libraries might be installed
// to lib64/lib32. This would be more foolproof by basing the sysroot off
// of the directory where librustc is located, rather than where the rustc
// binary is.
//If --libdir is set during configuration to the value other than
// "lib" (i.e. non-default), this value is used (see issue #16552).
match option_env!("CFG_LIBDIR_RELATIVE") {
Some(libdir) if libdir!= "lib" => return libdir.to_string(),
_ => if sysroot.join(primary_libdir_name()).join(rustlibdir()).exists() {
return primary_libdir_name();
} else {
return secondary_libdir_name();
}
}
#[cfg(target_word_size = "64")]
fn primary_libdir_name() -> String {
"lib64".to_string()
}
#[cfg(target_word_size = "32")]
fn primary_libdir_name() -> String {
"lib32".to_string()
}
fn secondary_libdir_name() -> String {
"lib".to_string()
}
}
#[cfg(windows)]
fn find_libdir(_sysroot: &Path) -> String {
"bin".to_string()
}
// The name of rustc's own place to organize libraries.
// Used to be "rustc", now the default is "rustlib"
pub fn rustlibdir() -> String {
"rustlib".to_string()
}
|
get_rust_path
|
identifier_name
|
filesearch.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_camel_case_types)]
pub use self::FileMatch::*;
use std::cell::RefCell;
use std::collections::HashSet;
use std::io::fs::PathExtensions;
use std::io::fs;
use std::os;
use util::fs as myfs;
pub enum FileMatch { FileMatches, FileDoesntMatch }
// A module for searching for libraries
// FIXME (#2658): I'm not happy how this module turned out. Should
// probably just be folded into cstore.
/// Functions with type `pick` take a parent directory as well as
/// a file found in that directory.
pub type pick<'a> = |path: &Path|: 'a -> FileMatch;
pub struct FileSearch<'a> {
pub sysroot: &'a Path,
pub addl_lib_search_paths: &'a RefCell<Vec<Path>>,
pub triple: &'a str,
}
impl<'a> FileSearch<'a> {
pub fn for_each_lib_search_path(&self, f: |&Path| -> FileMatch) {
let mut visited_dirs = HashSet::new();
let mut found = false;
debug!("filesearch: searching additional lib search paths [{}]",
self.addl_lib_search_paths.borrow().len());
for path in self.addl_lib_search_paths.borrow().iter() {
match f(path) {
FileMatches => found = true,
FileDoesntMatch => ()
}
visited_dirs.insert(path.as_vec().to_vec());
}
debug!("filesearch: searching lib path");
let tlib_path = make_target_lib_path(self.sysroot,
self.triple);
if!visited_dirs.contains(tlib_path.as_vec()) {
match f(&tlib_path) {
FileMatches => found = true,
FileDoesntMatch => ()
}
}
visited_dirs.insert(tlib_path.as_vec().to_vec());
// Try RUST_PATH
if!found {
let rustpath = rust_path();
for path in rustpath.iter() {
let tlib_path = make_rustpkg_lib_path(
self.sysroot, path, self.triple);
debug!("is {} in visited_dirs? {}", tlib_path.display(),
visited_dirs.contains(&tlib_path.as_vec().to_vec()));
if!visited_dirs.contains(tlib_path.as_vec()) {
visited_dirs.insert(tlib_path.as_vec().to_vec());
// Don't keep searching the RUST_PATH if one match turns up --
// if we did, we'd get a "multiple matching crates" error
match f(&tlib_path) {
FileMatches => {
break;
}
FileDoesntMatch => ()
}
}
}
}
}
pub fn get_lib_path(&self) -> Path {
make_target_lib_path(self.sysroot, self.triple)
}
pub fn search(&self, pick: pick) {
self.for_each_lib_search_path(|lib_search_path| {
debug!("searching {}", lib_search_path.display());
match fs::readdir(lib_search_path) {
Ok(files) => {
let mut rslt = FileDoesntMatch;
fn is_rlib(p: & &Path) -> bool {
p.extension_str() == Some("rlib")
}
// Reading metadata out of rlibs is faster, and if we find both
// an rlib and a dylib we only read one of the files of
// metadata, so in the name of speed, bring all rlib files to
// the front of the search list.
let files1 = files.iter().filter(|p| is_rlib(p));
let files2 = files.iter().filter(|p|!is_rlib(p));
for path in files1.chain(files2) {
debug!("testing {}", path.display());
let maybe_picked = pick(path);
match maybe_picked {
FileMatches => {
debug!("picked {}", path.display());
rslt = FileMatches;
}
FileDoesntMatch => {
debug!("rejected {}", path.display());
}
}
}
rslt
}
Err(..) => FileDoesntMatch,
}
});
}
pub fn new(sysroot: &'a Path,
triple: &'a str,
addl_lib_search_paths: &'a RefCell<Vec<Path>>) -> FileSearch<'a> {
debug!("using sysroot = {}, triple = {}", sysroot.display(), triple);
FileSearch {
sysroot: sysroot,
addl_lib_search_paths: addl_lib_search_paths,
triple: triple,
}
}
// Returns a list of directories where target-specific dylibs might be located.
pub fn get_dylib_search_paths(&self) -> Vec<Path> {
let mut paths = Vec::new();
self.for_each_lib_search_path(|lib_search_path| {
paths.push(lib_search_path.clone());
FileDoesntMatch
});
paths
}
// Returns a list of directories where target-specific tool binaries are located.
pub fn get_tools_search_paths(&self) -> Vec<Path> {
let mut p = Path::new(self.sysroot);
p.push(find_libdir(self.sysroot));
p.push(rustlibdir());
p.push(self.triple);
p.push("bin");
vec![p]
}
}
pub fn relative_target_lib_path(sysroot: &Path, target_triple: &str) -> Path {
let mut p = Path::new(find_libdir(sysroot));
assert!(p.is_relative());
p.push(rustlibdir());
p.push(target_triple);
p.push("lib");
p
}
fn make_target_lib_path(sysroot: &Path,
target_triple: &str) -> Path {
sysroot.join(&relative_target_lib_path(sysroot, target_triple))
}
fn make_rustpkg_lib_path(sysroot: &Path,
dir: &Path,
triple: &str) -> Path {
let mut p = dir.join(find_libdir(sysroot));
p.push(triple);
p
}
pub fn get_or_default_sysroot() -> Path {
// Follow symlinks. If the resolved path is relative, make it absolute.
fn canonicalize(path: Option<Path>) -> Option<Path> {
path.and_then(|path|
match myfs::realpath(&path) {
Ok(canon) => Some(canon),
Err(e) => panic!("failed to get realpath: {}", e),
})
}
match canonicalize(os::self_exe_name()) {
Some(mut p) => { p.pop(); p.pop(); p }
None => panic!("can't determine value for sysroot")
}
}
#[cfg(windows)]
static PATH_ENTRY_SEPARATOR: &'static str = ";";
#[cfg(not(windows))]
static PATH_ENTRY_SEPARATOR: &'static str = ":";
/// Returns RUST_PATH as a string, without default paths added
pub fn get_rust_path() -> Option<String> {
os::getenv("RUST_PATH").map(|x| x.to_string())
}
/// Returns the value of RUST_PATH, as a list
/// of Paths. Includes default entries for, if they exist:
/// $HOME/.rust
/// DIR/.rust for any DIR that's the current working directory
/// or an ancestor of it
pub fn rust_path() -> Vec<Path> {
let mut env_rust_path: Vec<Path> = match get_rust_path() {
Some(env_path) => {
let env_path_components =
env_path.as_slice().split_str(PATH_ENTRY_SEPARATOR);
env_path_components.map(|s| Path::new(s)).collect()
}
None => Vec::new()
};
let mut cwd = os::getcwd().unwrap();
// now add in default entries
let cwd_dot_rust = cwd.join(".rust");
if!env_rust_path.contains(&cwd_dot_rust) {
env_rust_path.push(cwd_dot_rust);
}
if!env_rust_path.contains(&cwd) {
env_rust_path.push(cwd.clone());
}
loop {
if { let f = cwd.filename(); f.is_none() || f.unwrap() == b".." } {
break
}
cwd.set_filename(".rust");
if!env_rust_path.contains(&cwd) && cwd.exists() {
env_rust_path.push(cwd.clone());
}
cwd.pop();
}
let h = os::homedir();
for h in h.iter() {
let p = h.join(".rust");
if!env_rust_path.contains(&p) && p.exists() {
env_rust_path.push(p);
}
}
env_rust_path
}
// The name of the directory rustc expects libraries to be located.
// On Unix should be "lib", on windows "bin"
#[cfg(unix)]
fn find_libdir(sysroot: &Path) -> String {
// FIXME: This is a quick hack to make the rustc binary able to locate
// Rust libraries in Linux environments where libraries might be installed
// to lib64/lib32. This would be more foolproof by basing the sysroot off
// of the directory where librustc is located, rather than where the rustc
// binary is.
//If --libdir is set during configuration to the value other than
// "lib" (i.e. non-default), this value is used (see issue #16552).
match option_env!("CFG_LIBDIR_RELATIVE") {
Some(libdir) if libdir!= "lib" => return libdir.to_string(),
_ => if sysroot.join(primary_libdir_name()).join(rustlibdir()).exists() {
return primary_libdir_name();
} else {
return secondary_libdir_name();
}
}
|
fn primary_libdir_name() -> String {
"lib64".to_string()
}
#[cfg(target_word_size = "32")]
fn primary_libdir_name() -> String {
"lib32".to_string()
}
fn secondary_libdir_name() -> String {
"lib".to_string()
}
}
#[cfg(windows)]
fn find_libdir(_sysroot: &Path) -> String {
"bin".to_string()
}
// The name of rustc's own place to organize libraries.
// Used to be "rustc", now the default is "rustlib"
pub fn rustlibdir() -> String {
"rustlib".to_string()
}
|
#[cfg(target_word_size = "64")]
|
random_line_split
|
filesearch.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_camel_case_types)]
pub use self::FileMatch::*;
use std::cell::RefCell;
use std::collections::HashSet;
use std::io::fs::PathExtensions;
use std::io::fs;
use std::os;
use util::fs as myfs;
pub enum FileMatch { FileMatches, FileDoesntMatch }
// A module for searching for libraries
// FIXME (#2658): I'm not happy how this module turned out. Should
// probably just be folded into cstore.
/// Functions with type `pick` take a parent directory as well as
/// a file found in that directory.
pub type pick<'a> = |path: &Path|: 'a -> FileMatch;
pub struct FileSearch<'a> {
pub sysroot: &'a Path,
pub addl_lib_search_paths: &'a RefCell<Vec<Path>>,
pub triple: &'a str,
}
impl<'a> FileSearch<'a> {
pub fn for_each_lib_search_path(&self, f: |&Path| -> FileMatch) {
let mut visited_dirs = HashSet::new();
let mut found = false;
debug!("filesearch: searching additional lib search paths [{}]",
self.addl_lib_search_paths.borrow().len());
for path in self.addl_lib_search_paths.borrow().iter() {
match f(path) {
FileMatches => found = true,
FileDoesntMatch => ()
}
visited_dirs.insert(path.as_vec().to_vec());
}
debug!("filesearch: searching lib path");
let tlib_path = make_target_lib_path(self.sysroot,
self.triple);
if!visited_dirs.contains(tlib_path.as_vec()) {
match f(&tlib_path) {
FileMatches => found = true,
FileDoesntMatch => ()
}
}
visited_dirs.insert(tlib_path.as_vec().to_vec());
// Try RUST_PATH
if!found {
let rustpath = rust_path();
for path in rustpath.iter() {
let tlib_path = make_rustpkg_lib_path(
self.sysroot, path, self.triple);
debug!("is {} in visited_dirs? {}", tlib_path.display(),
visited_dirs.contains(&tlib_path.as_vec().to_vec()));
if!visited_dirs.contains(tlib_path.as_vec()) {
visited_dirs.insert(tlib_path.as_vec().to_vec());
// Don't keep searching the RUST_PATH if one match turns up --
// if we did, we'd get a "multiple matching crates" error
match f(&tlib_path) {
FileMatches => {
break;
}
FileDoesntMatch => ()
}
}
}
}
}
pub fn get_lib_path(&self) -> Path {
make_target_lib_path(self.sysroot, self.triple)
}
pub fn search(&self, pick: pick) {
self.for_each_lib_search_path(|lib_search_path| {
debug!("searching {}", lib_search_path.display());
match fs::readdir(lib_search_path) {
Ok(files) => {
let mut rslt = FileDoesntMatch;
fn is_rlib(p: & &Path) -> bool {
p.extension_str() == Some("rlib")
}
// Reading metadata out of rlibs is faster, and if we find both
// an rlib and a dylib we only read one of the files of
// metadata, so in the name of speed, bring all rlib files to
// the front of the search list.
let files1 = files.iter().filter(|p| is_rlib(p));
let files2 = files.iter().filter(|p|!is_rlib(p));
for path in files1.chain(files2) {
debug!("testing {}", path.display());
let maybe_picked = pick(path);
match maybe_picked {
FileMatches => {
debug!("picked {}", path.display());
rslt = FileMatches;
}
FileDoesntMatch => {
debug!("rejected {}", path.display());
}
}
}
rslt
}
Err(..) => FileDoesntMatch,
}
});
}
pub fn new(sysroot: &'a Path,
triple: &'a str,
addl_lib_search_paths: &'a RefCell<Vec<Path>>) -> FileSearch<'a> {
debug!("using sysroot = {}, triple = {}", sysroot.display(), triple);
FileSearch {
sysroot: sysroot,
addl_lib_search_paths: addl_lib_search_paths,
triple: triple,
}
}
// Returns a list of directories where target-specific dylibs might be located.
pub fn get_dylib_search_paths(&self) -> Vec<Path> {
let mut paths = Vec::new();
self.for_each_lib_search_path(|lib_search_path| {
paths.push(lib_search_path.clone());
FileDoesntMatch
});
paths
}
// Returns a list of directories where target-specific tool binaries are located.
pub fn get_tools_search_paths(&self) -> Vec<Path> {
let mut p = Path::new(self.sysroot);
p.push(find_libdir(self.sysroot));
p.push(rustlibdir());
p.push(self.triple);
p.push("bin");
vec![p]
}
}
pub fn relative_target_lib_path(sysroot: &Path, target_triple: &str) -> Path {
let mut p = Path::new(find_libdir(sysroot));
assert!(p.is_relative());
p.push(rustlibdir());
p.push(target_triple);
p.push("lib");
p
}
fn make_target_lib_path(sysroot: &Path,
target_triple: &str) -> Path
|
fn make_rustpkg_lib_path(sysroot: &Path,
dir: &Path,
triple: &str) -> Path {
let mut p = dir.join(find_libdir(sysroot));
p.push(triple);
p
}
pub fn get_or_default_sysroot() -> Path {
// Follow symlinks. If the resolved path is relative, make it absolute.
fn canonicalize(path: Option<Path>) -> Option<Path> {
path.and_then(|path|
match myfs::realpath(&path) {
Ok(canon) => Some(canon),
Err(e) => panic!("failed to get realpath: {}", e),
})
}
match canonicalize(os::self_exe_name()) {
Some(mut p) => { p.pop(); p.pop(); p }
None => panic!("can't determine value for sysroot")
}
}
#[cfg(windows)]
static PATH_ENTRY_SEPARATOR: &'static str = ";";
#[cfg(not(windows))]
static PATH_ENTRY_SEPARATOR: &'static str = ":";
/// Returns RUST_PATH as a string, without default paths added
pub fn get_rust_path() -> Option<String> {
os::getenv("RUST_PATH").map(|x| x.to_string())
}
/// Returns the value of RUST_PATH, as a list
/// of Paths. Includes default entries for, if they exist:
/// $HOME/.rust
/// DIR/.rust for any DIR that's the current working directory
/// or an ancestor of it
pub fn rust_path() -> Vec<Path> {
let mut env_rust_path: Vec<Path> = match get_rust_path() {
Some(env_path) => {
let env_path_components =
env_path.as_slice().split_str(PATH_ENTRY_SEPARATOR);
env_path_components.map(|s| Path::new(s)).collect()
}
None => Vec::new()
};
let mut cwd = os::getcwd().unwrap();
// now add in default entries
let cwd_dot_rust = cwd.join(".rust");
if!env_rust_path.contains(&cwd_dot_rust) {
env_rust_path.push(cwd_dot_rust);
}
if!env_rust_path.contains(&cwd) {
env_rust_path.push(cwd.clone());
}
loop {
if { let f = cwd.filename(); f.is_none() || f.unwrap() == b".." } {
break
}
cwd.set_filename(".rust");
if!env_rust_path.contains(&cwd) && cwd.exists() {
env_rust_path.push(cwd.clone());
}
cwd.pop();
}
let h = os::homedir();
for h in h.iter() {
let p = h.join(".rust");
if!env_rust_path.contains(&p) && p.exists() {
env_rust_path.push(p);
}
}
env_rust_path
}
// The name of the directory rustc expects libraries to be located.
// On Unix should be "lib", on windows "bin"
#[cfg(unix)]
fn find_libdir(sysroot: &Path) -> String {
// FIXME: This is a quick hack to make the rustc binary able to locate
// Rust libraries in Linux environments where libraries might be installed
// to lib64/lib32. This would be more foolproof by basing the sysroot off
// of the directory where librustc is located, rather than where the rustc
// binary is.
//If --libdir is set during configuration to the value other than
// "lib" (i.e. non-default), this value is used (see issue #16552).
match option_env!("CFG_LIBDIR_RELATIVE") {
Some(libdir) if libdir!= "lib" => return libdir.to_string(),
_ => if sysroot.join(primary_libdir_name()).join(rustlibdir()).exists() {
return primary_libdir_name();
} else {
return secondary_libdir_name();
}
}
#[cfg(target_word_size = "64")]
fn primary_libdir_name() -> String {
"lib64".to_string()
}
#[cfg(target_word_size = "32")]
fn primary_libdir_name() -> String {
"lib32".to_string()
}
fn secondary_libdir_name() -> String {
"lib".to_string()
}
}
#[cfg(windows)]
fn find_libdir(_sysroot: &Path) -> String {
"bin".to_string()
}
// The name of rustc's own place to organize libraries.
// Used to be "rustc", now the default is "rustlib"
pub fn rustlibdir() -> String {
"rustlib".to_string()
}
|
{
sysroot.join(&relative_target_lib_path(sysroot, target_triple))
}
|
identifier_body
|
stylesheetlist.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::StyleSheetListBinding;
use crate::dom::bindings::codegen::Bindings::StyleSheetListBinding::StyleSheetListMethods;
use crate::dom::bindings::reflector::{reflect_dom_object, Reflector};
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::document::Document;
use crate::dom::stylesheet::StyleSheet;
use crate::dom::window::Window;
use dom_struct::dom_struct;
#[dom_struct]
pub struct StyleSheetList {
reflector_: Reflector,
document: Dom<Document>,
}
impl StyleSheetList {
#[allow(unrooted_must_root)]
fn new_inherited(doc: Dom<Document>) -> StyleSheetList {
StyleSheetList {
reflector_: Reflector::new(),
document: doc,
}
}
#[allow(unrooted_must_root)]
pub fn new(window: &Window, document: Dom<Document>) -> DomRoot<StyleSheetList> {
reflect_dom_object(
Box::new(StyleSheetList::new_inherited(document)),
window,
StyleSheetListBinding::Wrap,
)
}
}
impl StyleSheetListMethods for StyleSheetList {
// https://drafts.csswg.org/cssom/#dom-stylesheetlist-length
fn Length(&self) -> u32
|
// https://drafts.csswg.org/cssom/#dom-stylesheetlist-item
fn Item(&self, index: u32) -> Option<DomRoot<StyleSheet>> {
// XXXManishearth this doesn't handle the origin clean flag and is a
// cors vulnerability
self.document
.stylesheet_at(index as usize)
.map(DomRoot::upcast)
}
// check-tidy: no specs after this line
fn IndexedGetter(&self, index: u32) -> Option<DomRoot<StyleSheet>> {
self.Item(index)
}
}
|
{
self.document.stylesheet_count() as u32
}
|
identifier_body
|
stylesheetlist.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::StyleSheetListBinding;
use crate::dom::bindings::codegen::Bindings::StyleSheetListBinding::StyleSheetListMethods;
use crate::dom::bindings::reflector::{reflect_dom_object, Reflector};
|
use crate::dom::stylesheet::StyleSheet;
use crate::dom::window::Window;
use dom_struct::dom_struct;
#[dom_struct]
pub struct StyleSheetList {
reflector_: Reflector,
document: Dom<Document>,
}
impl StyleSheetList {
#[allow(unrooted_must_root)]
fn new_inherited(doc: Dom<Document>) -> StyleSheetList {
StyleSheetList {
reflector_: Reflector::new(),
document: doc,
}
}
#[allow(unrooted_must_root)]
pub fn new(window: &Window, document: Dom<Document>) -> DomRoot<StyleSheetList> {
reflect_dom_object(
Box::new(StyleSheetList::new_inherited(document)),
window,
StyleSheetListBinding::Wrap,
)
}
}
impl StyleSheetListMethods for StyleSheetList {
// https://drafts.csswg.org/cssom/#dom-stylesheetlist-length
fn Length(&self) -> u32 {
self.document.stylesheet_count() as u32
}
// https://drafts.csswg.org/cssom/#dom-stylesheetlist-item
fn Item(&self, index: u32) -> Option<DomRoot<StyleSheet>> {
// XXXManishearth this doesn't handle the origin clean flag and is a
// cors vulnerability
self.document
.stylesheet_at(index as usize)
.map(DomRoot::upcast)
}
// check-tidy: no specs after this line
fn IndexedGetter(&self, index: u32) -> Option<DomRoot<StyleSheet>> {
self.Item(index)
}
}
|
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::document::Document;
|
random_line_split
|
stylesheetlist.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::StyleSheetListBinding;
use crate::dom::bindings::codegen::Bindings::StyleSheetListBinding::StyleSheetListMethods;
use crate::dom::bindings::reflector::{reflect_dom_object, Reflector};
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::document::Document;
use crate::dom::stylesheet::StyleSheet;
use crate::dom::window::Window;
use dom_struct::dom_struct;
#[dom_struct]
pub struct StyleSheetList {
reflector_: Reflector,
document: Dom<Document>,
}
impl StyleSheetList {
#[allow(unrooted_must_root)]
fn
|
(doc: Dom<Document>) -> StyleSheetList {
StyleSheetList {
reflector_: Reflector::new(),
document: doc,
}
}
#[allow(unrooted_must_root)]
pub fn new(window: &Window, document: Dom<Document>) -> DomRoot<StyleSheetList> {
reflect_dom_object(
Box::new(StyleSheetList::new_inherited(document)),
window,
StyleSheetListBinding::Wrap,
)
}
}
impl StyleSheetListMethods for StyleSheetList {
// https://drafts.csswg.org/cssom/#dom-stylesheetlist-length
fn Length(&self) -> u32 {
self.document.stylesheet_count() as u32
}
// https://drafts.csswg.org/cssom/#dom-stylesheetlist-item
fn Item(&self, index: u32) -> Option<DomRoot<StyleSheet>> {
// XXXManishearth this doesn't handle the origin clean flag and is a
// cors vulnerability
self.document
.stylesheet_at(index as usize)
.map(DomRoot::upcast)
}
// check-tidy: no specs after this line
fn IndexedGetter(&self, index: u32) -> Option<DomRoot<StyleSheet>> {
self.Item(index)
}
}
|
new_inherited
|
identifier_name
|
testcase_units.rs
|
// https://rustbyexample.com/generics/phantom/testcase_units.html
// http://rust-lang-ja.org/rust-by-example/generics/phantom/testcase_units.html
use std::ops::Add;
use std::marker::PhantomData;
/// Create void enumerations to define unit types.
#[derive(Debug, Clone, Copy)]
enum Inch {}
#[derive(Debug, Clone, Copy)]
enum Mm {}
/// `Length` is a type with phantom type parameter `Unit`,
/// and is not generic over the length type (that is `f64`).
///
|
#[derive(Debug, Clone, Copy)]
struct Length<Unit>(f64, PhantomData<Unit>);
/// The `Add` trait defines the behaviour of the `+` operator.
impl<Unit> Add for Length<Unit> {
type Output = Length<Unit>;
// add() returns a new `Length` struct containig the sum.
fn add(self, rhs: Length<Unit>) -> Length<Unit> {
// `+` calls the `Add` implementation for `f64`.
Length(self.0 + rhs.0, PhantomData)
}
}
fn main() {
// Specifies `one_foot` to have phantom type parameter `Inch`.
let one_foot: Length<Inch> = Length(12.0, PhantomData);
// `one_meter` has phantom type parameter `Mm`.
let one_meter: Length<Mm> = Length(1000.0, PhantomData);
// `+` calls the `add()` method we implemented for `Length<Unit>`.
//
// Since `Length` implements `Copy`, `add()` does not consume
// `one_foot` add `one_meter` but copies them into `self` and `rhs`.
let two_feet = one_foot + one_foot;
let two_meters = one_meter + one_meter;
// Addition works.
println!("one foot + one_foot = {:?} in", two_feet.0);
println!("one meter + one_meter = {:?} mm", two_meters.0);
// Nonsensical operations fail as they should:
// Compile-time Error: type mismatch.
//let one_feter = one_foot + one_meter;
// error[E0308]: mismatched types
}
|
/// `f64` already implements the `Clone` and `Copy` traits.
|
random_line_split
|
testcase_units.rs
|
// https://rustbyexample.com/generics/phantom/testcase_units.html
// http://rust-lang-ja.org/rust-by-example/generics/phantom/testcase_units.html
use std::ops::Add;
use std::marker::PhantomData;
/// Create void enumerations to define unit types.
#[derive(Debug, Clone, Copy)]
enum Inch {}
#[derive(Debug, Clone, Copy)]
enum Mm {}
/// `Length` is a type with phantom type parameter `Unit`,
/// and is not generic over the length type (that is `f64`).
///
/// `f64` already implements the `Clone` and `Copy` traits.
#[derive(Debug, Clone, Copy)]
struct Length<Unit>(f64, PhantomData<Unit>);
/// The `Add` trait defines the behaviour of the `+` operator.
impl<Unit> Add for Length<Unit> {
type Output = Length<Unit>;
// add() returns a new `Length` struct containig the sum.
fn add(self, rhs: Length<Unit>) -> Length<Unit> {
// `+` calls the `Add` implementation for `f64`.
Length(self.0 + rhs.0, PhantomData)
}
}
fn main()
|
// error[E0308]: mismatched types
}
|
{
// Specifies `one_foot` to have phantom type parameter `Inch`.
let one_foot: Length<Inch> = Length(12.0, PhantomData);
// `one_meter` has phantom type parameter `Mm`.
let one_meter: Length<Mm> = Length(1000.0, PhantomData);
// `+` calls the `add()` method we implemented for `Length<Unit>`.
//
// Since `Length` implements `Copy`, `add()` does not consume
// `one_foot` add `one_meter` but copies them into `self` and `rhs`.
let two_feet = one_foot + one_foot;
let two_meters = one_meter + one_meter;
// Addition works.
println!("one foot + one_foot = {:?} in", two_feet.0);
println!("one meter + one_meter = {:?} mm", two_meters.0);
// Nonsensical operations fail as they should:
// Compile-time Error: type mismatch.
//let one_feter = one_foot + one_meter;
|
identifier_body
|
testcase_units.rs
|
// https://rustbyexample.com/generics/phantom/testcase_units.html
// http://rust-lang-ja.org/rust-by-example/generics/phantom/testcase_units.html
use std::ops::Add;
use std::marker::PhantomData;
/// Create void enumerations to define unit types.
#[derive(Debug, Clone, Copy)]
enum Inch {}
#[derive(Debug, Clone, Copy)]
enum
|
{}
/// `Length` is a type with phantom type parameter `Unit`,
/// and is not generic over the length type (that is `f64`).
///
/// `f64` already implements the `Clone` and `Copy` traits.
#[derive(Debug, Clone, Copy)]
struct Length<Unit>(f64, PhantomData<Unit>);
/// The `Add` trait defines the behaviour of the `+` operator.
impl<Unit> Add for Length<Unit> {
type Output = Length<Unit>;
// add() returns a new `Length` struct containig the sum.
fn add(self, rhs: Length<Unit>) -> Length<Unit> {
// `+` calls the `Add` implementation for `f64`.
Length(self.0 + rhs.0, PhantomData)
}
}
fn main() {
// Specifies `one_foot` to have phantom type parameter `Inch`.
let one_foot: Length<Inch> = Length(12.0, PhantomData);
// `one_meter` has phantom type parameter `Mm`.
let one_meter: Length<Mm> = Length(1000.0, PhantomData);
// `+` calls the `add()` method we implemented for `Length<Unit>`.
//
// Since `Length` implements `Copy`, `add()` does not consume
// `one_foot` add `one_meter` but copies them into `self` and `rhs`.
let two_feet = one_foot + one_foot;
let two_meters = one_meter + one_meter;
// Addition works.
println!("one foot + one_foot = {:?} in", two_feet.0);
println!("one meter + one_meter = {:?} mm", two_meters.0);
// Nonsensical operations fail as they should:
// Compile-time Error: type mismatch.
//let one_feter = one_foot + one_meter;
// error[E0308]: mismatched types
}
|
Mm
|
identifier_name
|
types.rs
|
// Copyright 2018 Dmitry Tantsur <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Types and traits shared between services.
use super::super::session::Session;
use super::super::Result;
/// Trait representing something that can be refreshed.
pub trait Refresh {
/// Refresh the resource representation.
fn refresh(&mut self) -> Result<()>;
}
/// A type that can be converted into a verified representation.
pub trait IntoVerified {
/// Convert this object into the same object with verification.
fn into_verified(self, _session: &Session) -> Result<Self>
where
Self: Sized,
|
}
macro_rules! opaque_resource_type {
($(#[$attr:meta])* $name:ident? $service:expr) => (
$(#[$attr])*
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct $name {
pub(crate) value: String,
pub(crate) verified: bool
}
impl From<String> for $name {
fn from(value: String) -> $name {
$name {
value,
verified: false
}
}
}
impl<'s> From<&'s str> for $name {
fn from(value: &'s str) -> $name {
$name {
value: String::from(value),
verified: false
}
}
}
impl From<$name> for String {
fn from(value: $name) -> String {
value.value
}
}
impl From<$name> for ::serde_json::Value {
fn from(value: $name) -> ::serde_json::Value {
value.value.into()
}
}
impl AsRef<str> for $name {
fn as_ref(&self) -> &str {
&self.value
}
}
impl ::std::fmt::Display for $name {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
self.value.fmt(f)
}
}
impl ::serde::ser::Serialize for $name {
fn serialize<S>(&self, serializer: S) -> ::std::result::Result<S::Ok, S::Error>
where S: ::serde::ser::Serializer {
serializer.serialize_str(&self.value)
}
}
impl<'de> ::serde::de::Deserialize<'de> for $name {
fn deserialize<D>(deserializer: D)
-> ::std::result::Result<$name, D::Error>
where D: ::serde::de::Deserializer<'de> {
Ok($name {
value: String::deserialize(deserializer)?,
// Assume that values coming from network are valid.
verified: true
})
}
}
impl $name {
/// Create a reference that was previously verified.
#[allow(dead_code)]
pub(crate) fn new_verified(value: String) -> $name {
$name {
value,
verified: true
}
}
}
#[cfg(not(feature = $service))]
#[allow(dead_code)]
impl $crate::common::IntoVerified for $name {}
)
}
opaque_resource_type!(#[doc = "An ID of a `Container`"] ContainerRef? "object-storage");
opaque_resource_type!(#[doc = "An ID of a `Flavor`"] FlavorRef? "compute");
opaque_resource_type!(#[doc = "An ID of an `Image`"] ImageRef? "image");
opaque_resource_type!(#[doc = "An ID of a `KeyPair`"] KeyPairRef? "compute");
opaque_resource_type!(#[doc = "An ID of a `Network`"] NetworkRef? "network");
opaque_resource_type!(#[doc = "An ID of an `Object`"] ObjectRef? "object-storage");
opaque_resource_type!(#[doc = "An ID of a `Project`"] ProjectRef? "identity");
opaque_resource_type!(#[doc = "An ID of a `Port`"] PortRef? "network");
opaque_resource_type!(#[doc = "An ID of a `Router`"] RouterRef? "network");
opaque_resource_type!(#[doc = "An ID of a `SecurityGroup`"] SecurityGroupRef? "network");
opaque_resource_type!(#[doc = "An ID of a `Snapshot`"] SnapshotRef? "volume");
opaque_resource_type!(#[doc = "An ID of a `Subnet`"] SubnetRef? "network");
opaque_resource_type!(#[doc = "An ID of a `User`"] UserRef? "identity");
opaque_resource_type!(#[doc = "An ID of a `Volume`"] VolumeRef? "volume");
#[cfg(test)]
mod test {
use serde_json;
opaque_resource_type!(TestId? "test");
#[test]
fn test_opaque_type_basics() {
let id = TestId::from("foo");
assert_eq!(id.as_ref(), "foo");
assert_eq!(&id.to_string(), "foo");
assert_eq!(id, TestId::from("foo"));
assert!(id!= TestId::from("bar"));
let s: String = id.into();
assert_eq!(&s, "foo");
}
#[test]
fn test_opaque_type_serde() {
let id: TestId = serde_json::from_str("\"foo\"").unwrap();
assert_eq!(id.as_ref(), "foo");
assert_eq!(serde_json::to_string(&id).unwrap(), "\"foo\"");
}
}
|
{
Ok(self)
}
|
identifier_body
|
types.rs
|
// Copyright 2018 Dmitry Tantsur <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Types and traits shared between services.
use super::super::session::Session;
use super::super::Result;
/// Trait representing something that can be refreshed.
pub trait Refresh {
/// Refresh the resource representation.
fn refresh(&mut self) -> Result<()>;
}
/// A type that can be converted into a verified representation.
pub trait IntoVerified {
/// Convert this object into the same object with verification.
fn into_verified(self, _session: &Session) -> Result<Self>
where
Self: Sized,
{
Ok(self)
}
}
macro_rules! opaque_resource_type {
($(#[$attr:meta])* $name:ident? $service:expr) => (
$(#[$attr])*
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct $name {
pub(crate) value: String,
pub(crate) verified: bool
}
impl From<String> for $name {
fn from(value: String) -> $name {
$name {
value,
verified: false
}
}
}
impl<'s> From<&'s str> for $name {
fn from(value: &'s str) -> $name {
$name {
value: String::from(value),
verified: false
}
}
}
impl From<$name> for String {
fn from(value: $name) -> String {
value.value
}
}
impl From<$name> for ::serde_json::Value {
fn from(value: $name) -> ::serde_json::Value {
value.value.into()
}
}
impl AsRef<str> for $name {
fn as_ref(&self) -> &str {
&self.value
}
}
impl ::std::fmt::Display for $name {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
self.value.fmt(f)
}
}
impl ::serde::ser::Serialize for $name {
fn serialize<S>(&self, serializer: S) -> ::std::result::Result<S::Ok, S::Error>
where S: ::serde::ser::Serializer {
serializer.serialize_str(&self.value)
}
}
impl<'de> ::serde::de::Deserialize<'de> for $name {
fn deserialize<D>(deserializer: D)
-> ::std::result::Result<$name, D::Error>
where D: ::serde::de::Deserializer<'de> {
Ok($name {
value: String::deserialize(deserializer)?,
// Assume that values coming from network are valid.
verified: true
})
}
}
impl $name {
/// Create a reference that was previously verified.
#[allow(dead_code)]
pub(crate) fn new_verified(value: String) -> $name {
$name {
value,
verified: true
}
}
}
#[cfg(not(feature = $service))]
#[allow(dead_code)]
impl $crate::common::IntoVerified for $name {}
)
}
opaque_resource_type!(#[doc = "An ID of a `Container`"] ContainerRef? "object-storage");
opaque_resource_type!(#[doc = "An ID of a `Flavor`"] FlavorRef? "compute");
opaque_resource_type!(#[doc = "An ID of an `Image`"] ImageRef? "image");
|
opaque_resource_type!(#[doc = "An ID of a `KeyPair`"] KeyPairRef? "compute");
opaque_resource_type!(#[doc = "An ID of a `Network`"] NetworkRef? "network");
opaque_resource_type!(#[doc = "An ID of an `Object`"] ObjectRef? "object-storage");
opaque_resource_type!(#[doc = "An ID of a `Project`"] ProjectRef? "identity");
opaque_resource_type!(#[doc = "An ID of a `Port`"] PortRef? "network");
opaque_resource_type!(#[doc = "An ID of a `Router`"] RouterRef? "network");
opaque_resource_type!(#[doc = "An ID of a `SecurityGroup`"] SecurityGroupRef? "network");
opaque_resource_type!(#[doc = "An ID of a `Snapshot`"] SnapshotRef? "volume");
opaque_resource_type!(#[doc = "An ID of a `Subnet`"] SubnetRef? "network");
opaque_resource_type!(#[doc = "An ID of a `User`"] UserRef? "identity");
opaque_resource_type!(#[doc = "An ID of a `Volume`"] VolumeRef? "volume");
#[cfg(test)]
mod test {
use serde_json;
opaque_resource_type!(TestId? "test");
#[test]
fn test_opaque_type_basics() {
let id = TestId::from("foo");
assert_eq!(id.as_ref(), "foo");
assert_eq!(&id.to_string(), "foo");
assert_eq!(id, TestId::from("foo"));
assert!(id!= TestId::from("bar"));
let s: String = id.into();
assert_eq!(&s, "foo");
}
#[test]
fn test_opaque_type_serde() {
let id: TestId = serde_json::from_str("\"foo\"").unwrap();
assert_eq!(id.as_ref(), "foo");
assert_eq!(serde_json::to_string(&id).unwrap(), "\"foo\"");
}
}
|
random_line_split
|
|
types.rs
|
// Copyright 2018 Dmitry Tantsur <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Types and traits shared between services.
use super::super::session::Session;
use super::super::Result;
/// Trait representing something that can be refreshed.
pub trait Refresh {
/// Refresh the resource representation.
fn refresh(&mut self) -> Result<()>;
}
/// A type that can be converted into a verified representation.
pub trait IntoVerified {
/// Convert this object into the same object with verification.
fn
|
(self, _session: &Session) -> Result<Self>
where
Self: Sized,
{
Ok(self)
}
}
macro_rules! opaque_resource_type {
($(#[$attr:meta])* $name:ident? $service:expr) => (
$(#[$attr])*
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct $name {
pub(crate) value: String,
pub(crate) verified: bool
}
impl From<String> for $name {
fn from(value: String) -> $name {
$name {
value,
verified: false
}
}
}
impl<'s> From<&'s str> for $name {
fn from(value: &'s str) -> $name {
$name {
value: String::from(value),
verified: false
}
}
}
impl From<$name> for String {
fn from(value: $name) -> String {
value.value
}
}
impl From<$name> for ::serde_json::Value {
fn from(value: $name) -> ::serde_json::Value {
value.value.into()
}
}
impl AsRef<str> for $name {
fn as_ref(&self) -> &str {
&self.value
}
}
impl ::std::fmt::Display for $name {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
self.value.fmt(f)
}
}
impl ::serde::ser::Serialize for $name {
fn serialize<S>(&self, serializer: S) -> ::std::result::Result<S::Ok, S::Error>
where S: ::serde::ser::Serializer {
serializer.serialize_str(&self.value)
}
}
impl<'de> ::serde::de::Deserialize<'de> for $name {
fn deserialize<D>(deserializer: D)
-> ::std::result::Result<$name, D::Error>
where D: ::serde::de::Deserializer<'de> {
Ok($name {
value: String::deserialize(deserializer)?,
// Assume that values coming from network are valid.
verified: true
})
}
}
impl $name {
/// Create a reference that was previously verified.
#[allow(dead_code)]
pub(crate) fn new_verified(value: String) -> $name {
$name {
value,
verified: true
}
}
}
#[cfg(not(feature = $service))]
#[allow(dead_code)]
impl $crate::common::IntoVerified for $name {}
)
}
opaque_resource_type!(#[doc = "An ID of a `Container`"] ContainerRef? "object-storage");
opaque_resource_type!(#[doc = "An ID of a `Flavor`"] FlavorRef? "compute");
opaque_resource_type!(#[doc = "An ID of an `Image`"] ImageRef? "image");
opaque_resource_type!(#[doc = "An ID of a `KeyPair`"] KeyPairRef? "compute");
opaque_resource_type!(#[doc = "An ID of a `Network`"] NetworkRef? "network");
opaque_resource_type!(#[doc = "An ID of an `Object`"] ObjectRef? "object-storage");
opaque_resource_type!(#[doc = "An ID of a `Project`"] ProjectRef? "identity");
opaque_resource_type!(#[doc = "An ID of a `Port`"] PortRef? "network");
opaque_resource_type!(#[doc = "An ID of a `Router`"] RouterRef? "network");
opaque_resource_type!(#[doc = "An ID of a `SecurityGroup`"] SecurityGroupRef? "network");
opaque_resource_type!(#[doc = "An ID of a `Snapshot`"] SnapshotRef? "volume");
opaque_resource_type!(#[doc = "An ID of a `Subnet`"] SubnetRef? "network");
opaque_resource_type!(#[doc = "An ID of a `User`"] UserRef? "identity");
opaque_resource_type!(#[doc = "An ID of a `Volume`"] VolumeRef? "volume");
#[cfg(test)]
mod test {
use serde_json;
opaque_resource_type!(TestId? "test");
#[test]
fn test_opaque_type_basics() {
let id = TestId::from("foo");
assert_eq!(id.as_ref(), "foo");
assert_eq!(&id.to_string(), "foo");
assert_eq!(id, TestId::from("foo"));
assert!(id!= TestId::from("bar"));
let s: String = id.into();
assert_eq!(&s, "foo");
}
#[test]
fn test_opaque_type_serde() {
let id: TestId = serde_json::from_str("\"foo\"").unwrap();
assert_eq!(id.as_ref(), "foo");
assert_eq!(serde_json::to_string(&id).unwrap(), "\"foo\"");
}
}
|
into_verified
|
identifier_name
|
dst-struct-sole.rs
|
// As dst-struct.rs, but the unsized field is the only field in the struct.
struct Fat<T:?Sized> {
ptr: T
}
// x is a fat pointer
fn
|
(x: &Fat<[isize]>) {
let y = &x.ptr;
assert_eq!(x.ptr.len(), 3);
assert_eq!(y[0], 1);
assert_eq!(x.ptr[1], 2);
}
fn foo2<T:ToBar>(x: &Fat<[T]>) {
let y = &x.ptr;
let bar = Bar;
assert_eq!(x.ptr.len(), 3);
assert_eq!(y[0].to_bar(), bar);
assert_eq!(x.ptr[1].to_bar(), bar);
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
struct Bar;
trait ToBar {
fn to_bar(&self) -> Bar;
}
impl ToBar for Bar {
fn to_bar(&self) -> Bar {
*self
}
}
pub fn main() {
// With a vec of ints.
let f1 = Fat { ptr: [1, 2, 3] };
foo(&f1);
let f2 = &f1;
foo(f2);
let f3: &Fat<[isize]> = f2;
foo(f3);
let f4: &Fat<[isize]> = &f1;
foo(f4);
let f5: &Fat<[isize]> = &Fat { ptr: [1, 2, 3] };
foo(f5);
// With a vec of Bars.
let bar = Bar;
let f1 = Fat { ptr: [bar, bar, bar] };
foo2(&f1);
let f2 = &f1;
foo2(f2);
let f3: &Fat<[Bar]> = f2;
foo2(f3);
let f4: &Fat<[Bar]> = &f1;
foo2(f4);
let f5: &Fat<[Bar]> = &Fat { ptr: [bar, bar, bar] };
foo2(f5);
// Assignment.
let f5: &mut Fat<[isize]> = &mut Fat { ptr: [1, 2, 3] };
f5.ptr[1] = 34;
assert_eq!(f5.ptr[0], 1);
assert_eq!(f5.ptr[1], 34);
assert_eq!(f5.ptr[2], 3);
// Zero size vec.
let f5: &Fat<[isize]> = &Fat { ptr: [] };
assert!(f5.ptr.is_empty());
let f5: &Fat<[Bar]> = &Fat { ptr: [] };
assert!(f5.ptr.is_empty());
}
|
foo
|
identifier_name
|
dst-struct-sole.rs
|
// As dst-struct.rs, but the unsized field is the only field in the struct.
struct Fat<T:?Sized> {
ptr: T
}
// x is a fat pointer
fn foo(x: &Fat<[isize]>) {
let y = &x.ptr;
assert_eq!(x.ptr.len(), 3);
assert_eq!(y[0], 1);
assert_eq!(x.ptr[1], 2);
}
fn foo2<T:ToBar>(x: &Fat<[T]>) {
let y = &x.ptr;
let bar = Bar;
assert_eq!(x.ptr.len(), 3);
assert_eq!(y[0].to_bar(), bar);
assert_eq!(x.ptr[1].to_bar(), bar);
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
struct Bar;
trait ToBar {
fn to_bar(&self) -> Bar;
}
impl ToBar for Bar {
fn to_bar(&self) -> Bar {
*self
}
}
pub fn main() {
// With a vec of ints.
let f1 = Fat { ptr: [1, 2, 3] };
|
let f2 = &f1;
foo(f2);
let f3: &Fat<[isize]> = f2;
foo(f3);
let f4: &Fat<[isize]> = &f1;
foo(f4);
let f5: &Fat<[isize]> = &Fat { ptr: [1, 2, 3] };
foo(f5);
// With a vec of Bars.
let bar = Bar;
let f1 = Fat { ptr: [bar, bar, bar] };
foo2(&f1);
let f2 = &f1;
foo2(f2);
let f3: &Fat<[Bar]> = f2;
foo2(f3);
let f4: &Fat<[Bar]> = &f1;
foo2(f4);
let f5: &Fat<[Bar]> = &Fat { ptr: [bar, bar, bar] };
foo2(f5);
// Assignment.
let f5: &mut Fat<[isize]> = &mut Fat { ptr: [1, 2, 3] };
f5.ptr[1] = 34;
assert_eq!(f5.ptr[0], 1);
assert_eq!(f5.ptr[1], 34);
assert_eq!(f5.ptr[2], 3);
// Zero size vec.
let f5: &Fat<[isize]> = &Fat { ptr: [] };
assert!(f5.ptr.is_empty());
let f5: &Fat<[Bar]> = &Fat { ptr: [] };
assert!(f5.ptr.is_empty());
}
|
foo(&f1);
|
random_line_split
|
profiler_unix.rs
|
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::Mutex;
use callgrind::CallgrindClientRequest;
#[derive(Debug, PartialEq)]
enum Profiler {
None,
GPerfTools,
CallGrind,
}
lazy_static::lazy_static! {
#[derive(Debug)]
static ref ACTIVE_PROFILER: Mutex<Profiler> = Mutex::new(Profiler::None);
}
/// Start profiling. Returns false if failed, i.e. there is already a profiling in progress.
///
/// When `profiling` feature is not enabled, this function will do nothing and there is totally
|
// TODO: Better multi-thread support.
#[inline]
pub fn start(name: impl AsRef<str>) -> bool {
let mut profiler = ACTIVE_PROFILER.lock().unwrap();
// Profiling in progress.
if *profiler!= Profiler::None {
return false;
}
if valgrind_request::running_on_valgrind()!= 0 {
*profiler = Profiler::CallGrind;
CallgrindClientRequest::start();
} else {
*profiler = Profiler::GPerfTools;
gperftools::PROFILER
.lock()
.unwrap()
.start(name.as_ref())
.unwrap();
}
true
}
/// Stop profiling. Returns false if failed, i.e. there is no profiling in progress.
///
/// When `profiling` feature is not enabled, this function will do nothing and there is totally
/// zero cost.
#[inline]
pub fn stop() -> bool {
let mut profiler = ACTIVE_PROFILER.lock().unwrap();
match *profiler {
Profiler::None => false,
Profiler::CallGrind => {
CallgrindClientRequest::stop(None);
*profiler = Profiler::None;
true
}
Profiler::GPerfTools => {
gperftools::PROFILER.lock().unwrap().stop().unwrap();
*profiler = Profiler::None;
true
}
}
}
|
/// zero cost.
///
/// When running in Callgrind, Callgrind instrumentation will be started
/// (`CALLGRIND_START_INSTRUMENTATION`). Otherwise, the CPU Profiler will be started and profile
/// will be generated to the file specified by `name`.
|
random_line_split
|
profiler_unix.rs
|
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::Mutex;
use callgrind::CallgrindClientRequest;
#[derive(Debug, PartialEq)]
enum Profiler {
None,
GPerfTools,
CallGrind,
}
lazy_static::lazy_static! {
#[derive(Debug)]
static ref ACTIVE_PROFILER: Mutex<Profiler> = Mutex::new(Profiler::None);
}
/// Start profiling. Returns false if failed, i.e. there is already a profiling in progress.
///
/// When `profiling` feature is not enabled, this function will do nothing and there is totally
/// zero cost.
///
/// When running in Callgrind, Callgrind instrumentation will be started
/// (`CALLGRIND_START_INSTRUMENTATION`). Otherwise, the CPU Profiler will be started and profile
/// will be generated to the file specified by `name`.
// TODO: Better multi-thread support.
#[inline]
pub fn start(name: impl AsRef<str>) -> bool {
let mut profiler = ACTIVE_PROFILER.lock().unwrap();
// Profiling in progress.
if *profiler!= Profiler::None {
return false;
}
if valgrind_request::running_on_valgrind()!= 0 {
*profiler = Profiler::CallGrind;
CallgrindClientRequest::start();
} else
|
true
}
/// Stop profiling. Returns false if failed, i.e. there is no profiling in progress.
///
/// When `profiling` feature is not enabled, this function will do nothing and there is totally
/// zero cost.
#[inline]
pub fn stop() -> bool {
let mut profiler = ACTIVE_PROFILER.lock().unwrap();
match *profiler {
Profiler::None => false,
Profiler::CallGrind => {
CallgrindClientRequest::stop(None);
*profiler = Profiler::None;
true
}
Profiler::GPerfTools => {
gperftools::PROFILER.lock().unwrap().stop().unwrap();
*profiler = Profiler::None;
true
}
}
}
|
{
*profiler = Profiler::GPerfTools;
gperftools::PROFILER
.lock()
.unwrap()
.start(name.as_ref())
.unwrap();
}
|
conditional_block
|
profiler_unix.rs
|
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::Mutex;
use callgrind::CallgrindClientRequest;
#[derive(Debug, PartialEq)]
enum
|
{
None,
GPerfTools,
CallGrind,
}
lazy_static::lazy_static! {
#[derive(Debug)]
static ref ACTIVE_PROFILER: Mutex<Profiler> = Mutex::new(Profiler::None);
}
/// Start profiling. Returns false if failed, i.e. there is already a profiling in progress.
///
/// When `profiling` feature is not enabled, this function will do nothing and there is totally
/// zero cost.
///
/// When running in Callgrind, Callgrind instrumentation will be started
/// (`CALLGRIND_START_INSTRUMENTATION`). Otherwise, the CPU Profiler will be started and profile
/// will be generated to the file specified by `name`.
// TODO: Better multi-thread support.
#[inline]
pub fn start(name: impl AsRef<str>) -> bool {
let mut profiler = ACTIVE_PROFILER.lock().unwrap();
// Profiling in progress.
if *profiler!= Profiler::None {
return false;
}
if valgrind_request::running_on_valgrind()!= 0 {
*profiler = Profiler::CallGrind;
CallgrindClientRequest::start();
} else {
*profiler = Profiler::GPerfTools;
gperftools::PROFILER
.lock()
.unwrap()
.start(name.as_ref())
.unwrap();
}
true
}
/// Stop profiling. Returns false if failed, i.e. there is no profiling in progress.
///
/// When `profiling` feature is not enabled, this function will do nothing and there is totally
/// zero cost.
#[inline]
pub fn stop() -> bool {
let mut profiler = ACTIVE_PROFILER.lock().unwrap();
match *profiler {
Profiler::None => false,
Profiler::CallGrind => {
CallgrindClientRequest::stop(None);
*profiler = Profiler::None;
true
}
Profiler::GPerfTools => {
gperftools::PROFILER.lock().unwrap().stop().unwrap();
*profiler = Profiler::None;
true
}
}
}
|
Profiler
|
identifier_name
|
profiler_unix.rs
|
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::Mutex;
use callgrind::CallgrindClientRequest;
#[derive(Debug, PartialEq)]
enum Profiler {
None,
GPerfTools,
CallGrind,
}
lazy_static::lazy_static! {
#[derive(Debug)]
static ref ACTIVE_PROFILER: Mutex<Profiler> = Mutex::new(Profiler::None);
}
/// Start profiling. Returns false if failed, i.e. there is already a profiling in progress.
///
/// When `profiling` feature is not enabled, this function will do nothing and there is totally
/// zero cost.
///
/// When running in Callgrind, Callgrind instrumentation will be started
/// (`CALLGRIND_START_INSTRUMENTATION`). Otherwise, the CPU Profiler will be started and profile
/// will be generated to the file specified by `name`.
// TODO: Better multi-thread support.
#[inline]
pub fn start(name: impl AsRef<str>) -> bool {
let mut profiler = ACTIVE_PROFILER.lock().unwrap();
// Profiling in progress.
if *profiler!= Profiler::None {
return false;
}
if valgrind_request::running_on_valgrind()!= 0 {
*profiler = Profiler::CallGrind;
CallgrindClientRequest::start();
} else {
*profiler = Profiler::GPerfTools;
gperftools::PROFILER
.lock()
.unwrap()
.start(name.as_ref())
.unwrap();
}
true
}
/// Stop profiling. Returns false if failed, i.e. there is no profiling in progress.
///
/// When `profiling` feature is not enabled, this function will do nothing and there is totally
/// zero cost.
#[inline]
pub fn stop() -> bool
|
{
let mut profiler = ACTIVE_PROFILER.lock().unwrap();
match *profiler {
Profiler::None => false,
Profiler::CallGrind => {
CallgrindClientRequest::stop(None);
*profiler = Profiler::None;
true
}
Profiler::GPerfTools => {
gperftools::PROFILER.lock().unwrap().stop().unwrap();
*profiler = Profiler::None;
true
}
}
}
|
identifier_body
|
|
print_table.rs
|
use itertools::*;
use std::cmp::max;
use std::fmt::{self, Debug, Formatter};
pub fn debug_table<A, B, C, D, E, F, G>(name: A,
column_names: B,
column_alignments: D,
rows: E)
-> Box<Debug>
where A: Into<String>,
B: IntoIterator<Item = C>,
C: Into<String>,
D: IntoIterator<Item = Alignment>,
E: IntoIterator<Item = F>,
F: IntoIterator<Item = G>,
G: Into<String>
{
let name = name.into();
let col_names = column_names.into_iter().map(Into::into).collect_vec();
let col_align = column_alignments.into_iter().collect_vec();
assert_eq!(col_names.len(), col_align.len());
let mut col_widths = col_names.iter().map(String::len).collect_vec();
let rows = rows.into_iter().map(|r| r.into_iter().map(Into::into).collect_vec()).collect_vec();
for row in rows.iter() {
assert_eq!(col_widths.len(), row.len());
for (i, x) in row.iter().enumerate() {
col_widths[i] = max(col_widths[i], x.len());
}
}
let header = format!("| {} |",
col_names.into_iter()
.enumerate()
.map(|(i, s)| format!("{:^1$}", s, col_widths[i]))
.join(" | "));
let sep = header.chars()
.map(|c| {
match c {
'|' => "+",
_ => "-",
}
})
.join("");
Box::new(TablePrinter {
name: name,
header: header,
sep: sep,
col_widths: col_widths,
col_align: col_align,
rows: rows,
})
}
pub enum Alignment {
Left,
Right,
Center,
}
struct TablePrinter {
name: String,
header: String,
sep: String,
col_widths: Vec<usize>,
col_align: Vec<Alignment>,
rows: Vec<Vec<String>>,
}
impl TablePrinter {
fn fmt_row(&self, row: &[String]) -> String {
format!("| {} |",
row.iter()
.enumerate()
.map(|(i, s)| {
match self.col_align[i] {
Alignment::Left => format!("{:<1$}", s, self.col_widths[i]),
Alignment::Center => format!("{:^1$}", s, self.col_widths[i]),
Alignment::Right => format!("{:>1$}", s, self.col_widths[i]),
}
})
.join(" | "))
}
}
impl Debug for TablePrinter {
fn fmt(&self, f: &mut Formatter) -> fmt::Result
|
}
|
{
try!(writeln!(f, "{}:", self.name));
try!(writeln!(f, "{}", self.sep));
try!(writeln!(f, "{}", self.header));
try!(writeln!(f, "{}", self.sep));
for row in self.rows.iter() {
try!(writeln!(f, "{}", self.fmt_row(&*row)));
}
writeln!(f, "{}", self.sep)
}
|
identifier_body
|
print_table.rs
|
use itertools::*;
use std::cmp::max;
use std::fmt::{self, Debug, Formatter};
pub fn debug_table<A, B, C, D, E, F, G>(name: A,
column_names: B,
column_alignments: D,
rows: E)
-> Box<Debug>
where A: Into<String>,
B: IntoIterator<Item = C>,
C: Into<String>,
D: IntoIterator<Item = Alignment>,
E: IntoIterator<Item = F>,
F: IntoIterator<Item = G>,
G: Into<String>
{
let name = name.into();
let col_names = column_names.into_iter().map(Into::into).collect_vec();
let col_align = column_alignments.into_iter().collect_vec();
assert_eq!(col_names.len(), col_align.len());
let mut col_widths = col_names.iter().map(String::len).collect_vec();
let rows = rows.into_iter().map(|r| r.into_iter().map(Into::into).collect_vec()).collect_vec();
for row in rows.iter() {
assert_eq!(col_widths.len(), row.len());
for (i, x) in row.iter().enumerate() {
col_widths[i] = max(col_widths[i], x.len());
}
}
let header = format!("| {} |",
col_names.into_iter()
.enumerate()
.map(|(i, s)| format!("{:^1$}", s, col_widths[i]))
.join(" | "));
let sep = header.chars()
.map(|c| {
match c {
'|' => "+",
_ => "-",
}
})
.join("");
Box::new(TablePrinter {
name: name,
header: header,
sep: sep,
col_widths: col_widths,
col_align: col_align,
rows: rows,
})
}
pub enum Alignment {
Left,
Right,
Center,
}
struct TablePrinter {
name: String,
header: String,
sep: String,
col_widths: Vec<usize>,
col_align: Vec<Alignment>,
rows: Vec<Vec<String>>,
}
impl TablePrinter {
fn fmt_row(&self, row: &[String]) -> String {
format!("| {} |",
row.iter()
.enumerate()
.map(|(i, s)| {
match self.col_align[i] {
Alignment::Left => format!("{:<1$}", s, self.col_widths[i]),
Alignment::Center => format!("{:^1$}", s, self.col_widths[i]),
Alignment::Right => format!("{:>1$}", s, self.col_widths[i]),
}
})
.join(" | "))
}
}
impl Debug for TablePrinter {
fn
|
(&self, f: &mut Formatter) -> fmt::Result {
try!(writeln!(f, "{}:", self.name));
try!(writeln!(f, "{}", self.sep));
try!(writeln!(f, "{}", self.header));
try!(writeln!(f, "{}", self.sep));
for row in self.rows.iter() {
try!(writeln!(f, "{}", self.fmt_row(&*row)));
}
writeln!(f, "{}", self.sep)
}
}
|
fmt
|
identifier_name
|
print_table.rs
|
use itertools::*;
use std::cmp::max;
use std::fmt::{self, Debug, Formatter};
pub fn debug_table<A, B, C, D, E, F, G>(name: A,
column_names: B,
column_alignments: D,
rows: E)
-> Box<Debug>
where A: Into<String>,
B: IntoIterator<Item = C>,
C: Into<String>,
D: IntoIterator<Item = Alignment>,
E: IntoIterator<Item = F>,
F: IntoIterator<Item = G>,
G: Into<String>
{
let name = name.into();
let col_names = column_names.into_iter().map(Into::into).collect_vec();
let col_align = column_alignments.into_iter().collect_vec();
assert_eq!(col_names.len(), col_align.len());
let mut col_widths = col_names.iter().map(String::len).collect_vec();
let rows = rows.into_iter().map(|r| r.into_iter().map(Into::into).collect_vec()).collect_vec();
for row in rows.iter() {
assert_eq!(col_widths.len(), row.len());
for (i, x) in row.iter().enumerate() {
col_widths[i] = max(col_widths[i], x.len());
}
}
let header = format!("| {} |",
col_names.into_iter()
.enumerate()
.map(|(i, s)| format!("{:^1$}", s, col_widths[i]))
.join(" | "));
let sep = header.chars()
.map(|c| {
match c {
'|' => "+",
|
Box::new(TablePrinter {
name: name,
header: header,
sep: sep,
col_widths: col_widths,
col_align: col_align,
rows: rows,
})
}
pub enum Alignment {
Left,
Right,
Center,
}
struct TablePrinter {
name: String,
header: String,
sep: String,
col_widths: Vec<usize>,
col_align: Vec<Alignment>,
rows: Vec<Vec<String>>,
}
impl TablePrinter {
fn fmt_row(&self, row: &[String]) -> String {
format!("| {} |",
row.iter()
.enumerate()
.map(|(i, s)| {
match self.col_align[i] {
Alignment::Left => format!("{:<1$}", s, self.col_widths[i]),
Alignment::Center => format!("{:^1$}", s, self.col_widths[i]),
Alignment::Right => format!("{:>1$}", s, self.col_widths[i]),
}
})
.join(" | "))
}
}
impl Debug for TablePrinter {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
try!(writeln!(f, "{}:", self.name));
try!(writeln!(f, "{}", self.sep));
try!(writeln!(f, "{}", self.header));
try!(writeln!(f, "{}", self.sep));
for row in self.rows.iter() {
try!(writeln!(f, "{}", self.fmt_row(&*row)));
}
writeln!(f, "{}", self.sep)
}
}
|
_ => "-",
}
})
.join("");
|
random_line_split
|
opcode.rs
|
use value::Value;
#[derive(Debug, Clone)]
pub enum OpCode {
Val(Value), // stack.push(Value)
Add, // stack.pop() + stack.pop()
Sub, // stack.pop() - stack.pop()
Mul, // stack.pop() * stack.pop()
Div, // stack.pop() / stack.pop()
EqEq, // stack.pop() == stack.pop()
NotEq, // stack.pop()!= stack.pop()
|
GtEq, // >=
Def, // scopes[stack.pop()] = stack.pop()
Ret,
Call, // stack.pop()(...)
JumpIfNot(i32), // if!stack.pop() -> pc += jump
JumpIf(i32), // if stack.pop() -> pc += jump
Jump(i32), // pc += jump
GetName(String), // stack.push(scopes[String])
}
|
Lt, // <
LtEq, // <=
Gt, // >
|
random_line_split
|
opcode.rs
|
use value::Value;
#[derive(Debug, Clone)]
pub enum
|
{
Val(Value), // stack.push(Value)
Add, // stack.pop() + stack.pop()
Sub, // stack.pop() - stack.pop()
Mul, // stack.pop() * stack.pop()
Div, // stack.pop() / stack.pop()
EqEq, // stack.pop() == stack.pop()
NotEq, // stack.pop()!= stack.pop()
Lt, // <
LtEq, // <=
Gt, // >
GtEq, // >=
Def, // scopes[stack.pop()] = stack.pop()
Ret,
Call, // stack.pop()(...)
JumpIfNot(i32), // if!stack.pop() -> pc += jump
JumpIf(i32), // if stack.pop() -> pc += jump
Jump(i32), // pc += jump
GetName(String), // stack.push(scopes[String])
}
|
OpCode
|
identifier_name
|
varuint.rs
|
use std::mem;
use varuint::{Deserializable, Serializable, Varint, VarintBaseType};
fn test_varuint<T: VarintBaseType>(v: T, size: usize)
where
Varint<T>: Serializable + Deserializable,
{
let v = Varint::<T>(v);
assert_eq!(size, v.size_hint());
let mut arr: [u8; 17] = unsafe { mem::uninitialized() };
assert_eq!(size, v.serialize(&mut (&mut arr as &mut [u8])).unwrap());
assert_eq!(v, Varint::deserialize(&mut (&arr as &[u8])).unwrap());
}
#[test]
fn test_all() {
test_varuint(0u8, 1);
test_varuint(240u8, 1);
test_varuint(0u16, 1);
test_varuint(240u16, 1);
test_varuint(0u32, 1);
test_varuint(240u32, 1);
test_varuint(0u64, 1);
test_varuint(240u64, 1);
test_varuint(0u128, 1);
test_varuint(240u128, 1);
test_varuint(241u8, 2);
test_varuint(2031u16, 2);
test_varuint(241u16, 2);
test_varuint(2031u32, 2);
test_varuint(241u32, 2);
test_varuint(2031u64, 2);
test_varuint(241u64, 2);
test_varuint(2031u128, 2);
test_varuint(241u128, 2);
test_varuint(2032u16, 3);
test_varuint(67567u32, 3);
test_varuint(2032u32, 3);
test_varuint(67567u64, 3);
test_varuint(2032u64, 3);
|
test_varuint(67567u128, 3);
test_varuint(2032u128, 3);
test_varuint(67568u32, 4);
test_varuint(16777215u32, 4);
test_varuint(67568u64, 4);
test_varuint(16777215u64, 4);
test_varuint(67568u128, 4);
test_varuint(16777215u128, 4);
test_varuint(16777216u32, 5);
test_varuint(4294967295u32, 5);
test_varuint(16777216u64, 5);
test_varuint(4294967295u64, 5);
test_varuint(16777216u128, 5);
test_varuint(4294967295u128, 5);
test_varuint(4294967296u64, 6);
test_varuint(1099511627775u128, 6);
test_varuint(4294967296u128, 6);
test_varuint(1099511627776u128, 7);
test_varuint(281474976710655u128, 7);
test_varuint(281474976710656u128, 8);
test_varuint(72057594037927935u128, 8);
test_varuint(72057594037927936u128, 9);
test_varuint(u128::from(u64::max_value()), 9);
test_varuint(u128::from(u64::max_value()) + 1, 17);
test_varuint(u128::max_value(), 17);
}
|
random_line_split
|
|
varuint.rs
|
use std::mem;
use varuint::{Deserializable, Serializable, Varint, VarintBaseType};
fn test_varuint<T: VarintBaseType>(v: T, size: usize)
where
Varint<T>: Serializable + Deserializable,
{
let v = Varint::<T>(v);
assert_eq!(size, v.size_hint());
let mut arr: [u8; 17] = unsafe { mem::uninitialized() };
assert_eq!(size, v.serialize(&mut (&mut arr as &mut [u8])).unwrap());
assert_eq!(v, Varint::deserialize(&mut (&arr as &[u8])).unwrap());
}
#[test]
fn test_all()
|
test_varuint(241u128, 2);
test_varuint(2032u16, 3);
test_varuint(67567u32, 3);
test_varuint(2032u32, 3);
test_varuint(67567u64, 3);
test_varuint(2032u64, 3);
test_varuint(67567u128, 3);
test_varuint(2032u128, 3);
test_varuint(67568u32, 4);
test_varuint(16777215u32, 4);
test_varuint(67568u64, 4);
test_varuint(16777215u64, 4);
test_varuint(67568u128, 4);
test_varuint(16777215u128, 4);
test_varuint(16777216u32, 5);
test_varuint(4294967295u32, 5);
test_varuint(16777216u64, 5);
test_varuint(4294967295u64, 5);
test_varuint(16777216u128, 5);
test_varuint(4294967295u128, 5);
test_varuint(4294967296u64, 6);
test_varuint(1099511627775u128, 6);
test_varuint(4294967296u128, 6);
test_varuint(1099511627776u128, 7);
test_varuint(281474976710655u128, 7);
test_varuint(281474976710656u128, 8);
test_varuint(72057594037927935u128, 8);
test_varuint(72057594037927936u128, 9);
test_varuint(u128::from(u64::max_value()), 9);
test_varuint(u128::from(u64::max_value()) + 1, 17);
test_varuint(u128::max_value(), 17);
}
|
{
test_varuint(0u8, 1);
test_varuint(240u8, 1);
test_varuint(0u16, 1);
test_varuint(240u16, 1);
test_varuint(0u32, 1);
test_varuint(240u32, 1);
test_varuint(0u64, 1);
test_varuint(240u64, 1);
test_varuint(0u128, 1);
test_varuint(240u128, 1);
test_varuint(241u8, 2);
test_varuint(2031u16, 2);
test_varuint(241u16, 2);
test_varuint(2031u32, 2);
test_varuint(241u32, 2);
test_varuint(2031u64, 2);
test_varuint(241u64, 2);
test_varuint(2031u128, 2);
|
identifier_body
|
varuint.rs
|
use std::mem;
use varuint::{Deserializable, Serializable, Varint, VarintBaseType};
fn test_varuint<T: VarintBaseType>(v: T, size: usize)
where
Varint<T>: Serializable + Deserializable,
{
let v = Varint::<T>(v);
assert_eq!(size, v.size_hint());
let mut arr: [u8; 17] = unsafe { mem::uninitialized() };
assert_eq!(size, v.serialize(&mut (&mut arr as &mut [u8])).unwrap());
assert_eq!(v, Varint::deserialize(&mut (&arr as &[u8])).unwrap());
}
#[test]
fn
|
() {
test_varuint(0u8, 1);
test_varuint(240u8, 1);
test_varuint(0u16, 1);
test_varuint(240u16, 1);
test_varuint(0u32, 1);
test_varuint(240u32, 1);
test_varuint(0u64, 1);
test_varuint(240u64, 1);
test_varuint(0u128, 1);
test_varuint(240u128, 1);
test_varuint(241u8, 2);
test_varuint(2031u16, 2);
test_varuint(241u16, 2);
test_varuint(2031u32, 2);
test_varuint(241u32, 2);
test_varuint(2031u64, 2);
test_varuint(241u64, 2);
test_varuint(2031u128, 2);
test_varuint(241u128, 2);
test_varuint(2032u16, 3);
test_varuint(67567u32, 3);
test_varuint(2032u32, 3);
test_varuint(67567u64, 3);
test_varuint(2032u64, 3);
test_varuint(67567u128, 3);
test_varuint(2032u128, 3);
test_varuint(67568u32, 4);
test_varuint(16777215u32, 4);
test_varuint(67568u64, 4);
test_varuint(16777215u64, 4);
test_varuint(67568u128, 4);
test_varuint(16777215u128, 4);
test_varuint(16777216u32, 5);
test_varuint(4294967295u32, 5);
test_varuint(16777216u64, 5);
test_varuint(4294967295u64, 5);
test_varuint(16777216u128, 5);
test_varuint(4294967295u128, 5);
test_varuint(4294967296u64, 6);
test_varuint(1099511627775u128, 6);
test_varuint(4294967296u128, 6);
test_varuint(1099511627776u128, 7);
test_varuint(281474976710655u128, 7);
test_varuint(281474976710656u128, 8);
test_varuint(72057594037927935u128, 8);
test_varuint(72057594037927936u128, 9);
test_varuint(u128::from(u64::max_value()), 9);
test_varuint(u128::from(u64::max_value()) + 1, 17);
test_varuint(u128::max_value(), 17);
}
|
test_all
|
identifier_name
|
add_template.rs
|
use std::io::{self, Write};
use std::{fs, path};
use toml;
use filesystem;
use common;
use error;
pub fn add_template(config_path: &str,
file_path: &str,
new_name: Option<&str>,
templating_enabled: bool)
-> Result<(), error::DotfilerError> {
let mut config = common::load_config(config_path)?;
let templates_path = common::get_templates_path(config_path)?;
let tar_path = match new_name {
Some(name) => name,
None => &file_path[file_path.rfind('/').unwrap() + 1..],
};
let tar_path = templates_path.join(tar_path);
let tar_path = tar_path.to_string_lossy().to_string();
if let Some(ref mut dotfiles) = config.dotfiles {
if let Some(duplicate_index) =
template_exists_already(dotfiles,
&templates_path.to_string_lossy(),
&tar_path,
file_path)? {
println!("The template exists already. Do you want to update or overwrite it? [y/N]");
let mut buf = String::new();
io::stdin().read_line(&mut buf)?;
if buf.to_lowercase().trim()!= "y" {
println!("The file has not been added.");
return Ok(());
} else {
{
let duplicate = dotfiles.get(duplicate_index).unwrap();
let duplicate_template_path = common::resolve_path(&duplicate.template, None)?;
let error = match fs::metadata(&duplicate_template_path) {
Ok(metadata) => {
if metadata.is_dir() {
fs::remove_dir_all(&duplicate_template_path).err()
} else {
fs::remove_file(&duplicate_template_path).err()
}
}
Err(e) => Some(e),
};
if let Some(e) = error {
let msg = format!("Unable to remove the duplicate file: {}", e);
return Err(error::DotfilerError::Message(msg));
}
}
dotfiles.swap_remove(duplicate_index);
}
}
} else {
config.dotfiles = Some(Vec::new());
}
// Back up old config to cache
if let Err(e) = fs::copy(&config_path, "./cache/config.toml") {
let msg = format!("Unable to save current config to backup cache:\n{}", e);
return Err(error::DotfilerError::Message(msg));
}
// Create all required target directories before root
let _ = path::Path::new(&tar_path)
.parent()
.map(|p| fs::create_dir_all(&p));
let mut root = match filesystem::create_tree_from_path(file_path, &tar_path) {
Ok(root) => root,
Err(e) => {
let msg = format!("Can't create tree for file '{}':\n{}", file_path, e);
return Err(error::DotfilerError::Message(msg));
}
};
if templating_enabled {
if let Some(ref vars) = config.variables {
if let Err(e) = root.template(vars) {
let msg = format!("Unable to add the file '{}':\n{}", file_path, e);
return Err(error::DotfilerError::Message(msg));
}
}
}
if let Err(e) = root.save() {
let mut msg = format!("Unable to add the file '{}':\n{}", file_path, e);
if let Err(e) = root.restore() {
msg = format!("Critical Error! Unable to recover from failure.\n{}", e);
}
return Err(error::DotfilerError::Message(msg));
}
// Add new file to config
let dotfile = common::Dotfile {
template: tar_path.clone(),
target: file_path.to_string(),
};
if let Some(ref mut dotfiles) = config.dotfiles {
dotfiles.push(dotfile);
}
// Save new config
let new_config = toml::to_string(&config)?;
if let Err(e) = fs::File::create(common::resolve_path(config_path, None)?)
.and_then(|mut f| f.write_all(new_config.as_bytes())) {
let mut msg = format!("Unable to save new config:\n{}", e);
if let Err(e) = fs::copy("./cache/config.toml", &config_path) {
msg = format!("Unable to restore old config after failure:\n{}", e);
}
return Err(error::DotfilerError::Message(msg));
}
println!("Successfully added '{}' to dotfiles.", file_path);
Ok(())
}
fn template_exists_already(dotfiles: &[common::Dotfile],
templates_path: &str,
template_path: &str,
tar_path: &str)
-> Result<Option<usize>, error::DotfilerError>
|
{
for (i, dotfile) in dotfiles.iter().enumerate() {
let existing_template_path = common::resolve_path(&dotfile.template, Some(templates_path))?;
let existing_tar_path = common::resolve_path(&dotfile.target, Some(templates_path))?;
if existing_template_path == template_path || existing_tar_path == tar_path {
return Ok(Some(i));
}
}
Ok(None)
}
|
identifier_body
|
|
add_template.rs
|
use std::io::{self, Write};
use std::{fs, path};
use toml;
use filesystem;
use common;
use error;
pub fn add_template(config_path: &str,
file_path: &str,
new_name: Option<&str>,
templating_enabled: bool)
-> Result<(), error::DotfilerError> {
let mut config = common::load_config(config_path)?;
let templates_path = common::get_templates_path(config_path)?;
let tar_path = match new_name {
Some(name) => name,
None => &file_path[file_path.rfind('/').unwrap() + 1..],
};
let tar_path = templates_path.join(tar_path);
let tar_path = tar_path.to_string_lossy().to_string();
if let Some(ref mut dotfiles) = config.dotfiles {
if let Some(duplicate_index) =
template_exists_already(dotfiles,
&templates_path.to_string_lossy(),
&tar_path,
file_path)? {
println!("The template exists already. Do you want to update or overwrite it? [y/N]");
let mut buf = String::new();
io::stdin().read_line(&mut buf)?;
if buf.to_lowercase().trim()!= "y" {
println!("The file has not been added.");
return Ok(());
} else {
{
let duplicate = dotfiles.get(duplicate_index).unwrap();
let duplicate_template_path = common::resolve_path(&duplicate.template, None)?;
let error = match fs::metadata(&duplicate_template_path) {
Ok(metadata) => {
if metadata.is_dir() {
fs::remove_dir_all(&duplicate_template_path).err()
} else {
fs::remove_file(&duplicate_template_path).err()
}
}
Err(e) => Some(e),
};
if let Some(e) = error {
let msg = format!("Unable to remove the duplicate file: {}", e);
return Err(error::DotfilerError::Message(msg));
}
}
dotfiles.swap_remove(duplicate_index);
}
}
} else {
config.dotfiles = Some(Vec::new());
}
// Back up old config to cache
if let Err(e) = fs::copy(&config_path, "./cache/config.toml") {
let msg = format!("Unable to save current config to backup cache:\n{}", e);
return Err(error::DotfilerError::Message(msg));
}
// Create all required target directories before root
let _ = path::Path::new(&tar_path)
.parent()
|
.map(|p| fs::create_dir_all(&p));
let mut root = match filesystem::create_tree_from_path(file_path, &tar_path) {
Ok(root) => root,
Err(e) => {
let msg = format!("Can't create tree for file '{}':\n{}", file_path, e);
return Err(error::DotfilerError::Message(msg));
}
};
if templating_enabled {
if let Some(ref vars) = config.variables {
if let Err(e) = root.template(vars) {
let msg = format!("Unable to add the file '{}':\n{}", file_path, e);
return Err(error::DotfilerError::Message(msg));
}
}
}
if let Err(e) = root.save() {
let mut msg = format!("Unable to add the file '{}':\n{}", file_path, e);
if let Err(e) = root.restore() {
msg = format!("Critical Error! Unable to recover from failure.\n{}", e);
}
return Err(error::DotfilerError::Message(msg));
}
// Add new file to config
let dotfile = common::Dotfile {
template: tar_path.clone(),
target: file_path.to_string(),
};
if let Some(ref mut dotfiles) = config.dotfiles {
dotfiles.push(dotfile);
}
// Save new config
let new_config = toml::to_string(&config)?;
if let Err(e) = fs::File::create(common::resolve_path(config_path, None)?)
.and_then(|mut f| f.write_all(new_config.as_bytes())) {
let mut msg = format!("Unable to save new config:\n{}", e);
if let Err(e) = fs::copy("./cache/config.toml", &config_path) {
msg = format!("Unable to restore old config after failure:\n{}", e);
}
return Err(error::DotfilerError::Message(msg));
}
println!("Successfully added '{}' to dotfiles.", file_path);
Ok(())
}
fn template_exists_already(dotfiles: &[common::Dotfile],
templates_path: &str,
template_path: &str,
tar_path: &str)
-> Result<Option<usize>, error::DotfilerError> {
for (i, dotfile) in dotfiles.iter().enumerate() {
let existing_template_path = common::resolve_path(&dotfile.template, Some(templates_path))?;
let existing_tar_path = common::resolve_path(&dotfile.target, Some(templates_path))?;
if existing_template_path == template_path || existing_tar_path == tar_path {
return Ok(Some(i));
}
}
Ok(None)
}
|
random_line_split
|
|
add_template.rs
|
use std::io::{self, Write};
use std::{fs, path};
use toml;
use filesystem;
use common;
use error;
pub fn
|
(config_path: &str,
file_path: &str,
new_name: Option<&str>,
templating_enabled: bool)
-> Result<(), error::DotfilerError> {
let mut config = common::load_config(config_path)?;
let templates_path = common::get_templates_path(config_path)?;
let tar_path = match new_name {
Some(name) => name,
None => &file_path[file_path.rfind('/').unwrap() + 1..],
};
let tar_path = templates_path.join(tar_path);
let tar_path = tar_path.to_string_lossy().to_string();
if let Some(ref mut dotfiles) = config.dotfiles {
if let Some(duplicate_index) =
template_exists_already(dotfiles,
&templates_path.to_string_lossy(),
&tar_path,
file_path)? {
println!("The template exists already. Do you want to update or overwrite it? [y/N]");
let mut buf = String::new();
io::stdin().read_line(&mut buf)?;
if buf.to_lowercase().trim()!= "y" {
println!("The file has not been added.");
return Ok(());
} else {
{
let duplicate = dotfiles.get(duplicate_index).unwrap();
let duplicate_template_path = common::resolve_path(&duplicate.template, None)?;
let error = match fs::metadata(&duplicate_template_path) {
Ok(metadata) => {
if metadata.is_dir() {
fs::remove_dir_all(&duplicate_template_path).err()
} else {
fs::remove_file(&duplicate_template_path).err()
}
}
Err(e) => Some(e),
};
if let Some(e) = error {
let msg = format!("Unable to remove the duplicate file: {}", e);
return Err(error::DotfilerError::Message(msg));
}
}
dotfiles.swap_remove(duplicate_index);
}
}
} else {
config.dotfiles = Some(Vec::new());
}
// Back up old config to cache
if let Err(e) = fs::copy(&config_path, "./cache/config.toml") {
let msg = format!("Unable to save current config to backup cache:\n{}", e);
return Err(error::DotfilerError::Message(msg));
}
// Create all required target directories before root
let _ = path::Path::new(&tar_path)
.parent()
.map(|p| fs::create_dir_all(&p));
let mut root = match filesystem::create_tree_from_path(file_path, &tar_path) {
Ok(root) => root,
Err(e) => {
let msg = format!("Can't create tree for file '{}':\n{}", file_path, e);
return Err(error::DotfilerError::Message(msg));
}
};
if templating_enabled {
if let Some(ref vars) = config.variables {
if let Err(e) = root.template(vars) {
let msg = format!("Unable to add the file '{}':\n{}", file_path, e);
return Err(error::DotfilerError::Message(msg));
}
}
}
if let Err(e) = root.save() {
let mut msg = format!("Unable to add the file '{}':\n{}", file_path, e);
if let Err(e) = root.restore() {
msg = format!("Critical Error! Unable to recover from failure.\n{}", e);
}
return Err(error::DotfilerError::Message(msg));
}
// Add new file to config
let dotfile = common::Dotfile {
template: tar_path.clone(),
target: file_path.to_string(),
};
if let Some(ref mut dotfiles) = config.dotfiles {
dotfiles.push(dotfile);
}
// Save new config
let new_config = toml::to_string(&config)?;
if let Err(e) = fs::File::create(common::resolve_path(config_path, None)?)
.and_then(|mut f| f.write_all(new_config.as_bytes())) {
let mut msg = format!("Unable to save new config:\n{}", e);
if let Err(e) = fs::copy("./cache/config.toml", &config_path) {
msg = format!("Unable to restore old config after failure:\n{}", e);
}
return Err(error::DotfilerError::Message(msg));
}
println!("Successfully added '{}' to dotfiles.", file_path);
Ok(())
}
fn template_exists_already(dotfiles: &[common::Dotfile],
templates_path: &str,
template_path: &str,
tar_path: &str)
-> Result<Option<usize>, error::DotfilerError> {
for (i, dotfile) in dotfiles.iter().enumerate() {
let existing_template_path = common::resolve_path(&dotfile.template, Some(templates_path))?;
let existing_tar_path = common::resolve_path(&dotfile.target, Some(templates_path))?;
if existing_template_path == template_path || existing_tar_path == tar_path {
return Ok(Some(i));
}
}
Ok(None)
}
|
add_template
|
identifier_name
|
mutated_accounts_tests.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::compiler::{as_module, compile_units};
use move_core_types::{
account_address::AccountAddress,
gas_schedule::{GasAlgebra, GasUnits},
identifier::Identifier,
language_storage::ModuleId,
value::{serialize_values, MoveValue},
};
use move_vm_runtime::{logging::NoContextLog, move_vm::MoveVM};
use move_vm_test_utils::InMemoryStorage;
use move_vm_types::gas_schedule::{zero_cost_schedule, CostStrategy};
const TEST_ADDR: AccountAddress = AccountAddress::new([42; AccountAddress::LENGTH]);
#[test]
fn mutated_accounts() {
let code = r#"
module M {
resource struct Foo { a: bool }
public fun get(addr: address): bool acquires Foo {
borrow_global<Foo>(addr).a
}
public fun flip(addr: address) acquires Foo {
let f_ref = borrow_global_mut<Foo>(addr);
f_ref.a =!f_ref.a;
}
public fun publish(addr: &signer) {
move_to(addr, Foo { a: true} )
}
}
"#;
let mut units = compile_units(TEST_ADDR, &code).unwrap();
let m = as_module(units.pop().unwrap());
let mut blob = vec![];
m.serialize(&mut blob).unwrap();
let mut storage = InMemoryStorage::new();
let module_id = ModuleId::new(TEST_ADDR, Identifier::new("M").unwrap());
storage.publish_or_overwrite_module(module_id.clone(), blob);
let vm = MoveVM::new();
let mut sess = vm.new_session(&storage);
let cost_table = zero_cost_schedule();
let mut cost_strategy = CostStrategy::system(&cost_table, GasUnits::new(0));
let context = NoContextLog::new();
let publish = Identifier::new("publish").unwrap();
let flip = Identifier::new("flip").unwrap();
let get = Identifier::new("get").unwrap();
let account1 = AccountAddress::random();
sess.execute_function(
&module_id,
&publish,
vec![],
serialize_values(&vec![MoveValue::Signer(account1)]),
&mut cost_strategy,
&context,
)
.unwrap();
// The resource was published to "account1" and the sender's account
// (TEST_ADDR) is assumed to be mutated as well (e.g., in a subsequent
// transaction epilogue).
assert_eq!(sess.num_mutated_accounts(&TEST_ADDR), 2);
sess.execute_function(
&module_id,
&get,
vec![],
serialize_values(&vec![MoveValue::Address(account1)]),
&mut cost_strategy,
&context,
)
.unwrap();
assert_eq!(sess.num_mutated_accounts(&TEST_ADDR), 2);
sess.execute_function(
&module_id,
&flip,
vec![],
serialize_values(&vec![MoveValue::Address(account1)]),
&mut cost_strategy,
&context,
)
.unwrap();
assert_eq!(sess.num_mutated_accounts(&TEST_ADDR), 2);
let (changes, _) = sess.finish().unwrap();
storage.apply(changes).unwrap();
let mut sess = vm.new_session(&storage);
|
sess.execute_function(
&module_id,
&get,
vec![],
serialize_values(&vec![MoveValue::Address(account1)]),
&mut cost_strategy,
&context,
)
.unwrap();
// Only the sender's account (TEST_ADDR) should have been modified.
assert_eq!(sess.num_mutated_accounts(&TEST_ADDR), 1);
}
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.