file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs | extern crate serenity;
use serenity::prelude::*;
use serenity::model::*;
use std::env;
// Serenity implements transparent sharding in a way that you do not need to
// manually handle separate processes or connections manually.
//
// Transparent sharding is useful for a shared cache. Instead of having caches
// with duplicated data, a shared cache means all your data can be easily
// accessible across all shards.
//
// If your bot is on many guilds - or over the maximum of 2500 - then you
// should/must use guild sharding.
//
// This is an example file showing how guild sharding works. For this to
// properly be able to be seen in effect, your bot should be in at least 2
// guilds.
//
// Taking a scenario of 2 guilds, try saying "!ping" in one guild. It should
// print either "0" or "1" in the console. Saying "!ping" in the other guild,
// it should cache the other number in the console. This confirms that guild
// sharding works.
struct Handler;
impl EventHandler for Handler {
fn on_message(&self, ctx: Context, msg: Message) {
if msg.content == "!ping" {
// The current shard needs to be unlocked so it can be read from, as
// multiple threads may otherwise attempt to read from or mutate it
// concurrently.
{
let shard = ctx.shard.lock();
let shard_info = shard.shard_info();
println!("Shard {}", shard_info[0]);
}
if let Err(why) = msg.channel_id.say("Pong!") {
println!("Error sending message: {:?}", why);
}
}
}
fn on_ready(&self, _: Context, ready: Ready) |
}
fn main() {
// Configure the client with your Discord bot token in the environment.
let token = env::var("DISCORD_TOKEN")
.expect("Expected a token in the environment");
let mut client = Client::new(&token, Handler);
// The total number of shards to use. The "current shard number" of a
// shard - that is, the shard it is assigned to - is indexed at 0,
// while the total shard count is indexed at 1.
//
// This means if you have 5 shards, your total shard count will be 5, while
// each shard will be assigned numbers 0 through 4.
if let Err(why) = client.start_shards(2) {
println!("Client error: {:?}", why);
}
}
| {
println!("{} is connected!", ready.user.name);
} | identifier_body |
main.rs | extern crate serenity;
use serenity::prelude::*;
use serenity::model::*;
use std::env;
// Serenity implements transparent sharding in a way that you do not need to
// manually handle separate processes or connections manually.
//
// Transparent sharding is useful for a shared cache. Instead of having caches
// with duplicated data, a shared cache means all your data can be easily
// accessible across all shards.
//
// If your bot is on many guilds - or over the maximum of 2500 - then you
// should/must use guild sharding.
//
// This is an example file showing how guild sharding works. For this to
// properly be able to be seen in effect, your bot should be in at least 2
// guilds.
//
// Taking a scenario of 2 guilds, try saying "!ping" in one guild. It should
// print either "0" or "1" in the console. Saying "!ping" in the other guild,
// it should cache the other number in the console. This confirms that guild
// sharding works.
struct Handler;
impl EventHandler for Handler {
fn on_message(&self, ctx: Context, msg: Message) {
if msg.content == "!ping" {
// The current shard needs to be unlocked so it can be read from, as
// multiple threads may otherwise attempt to read from or mutate it
// concurrently.
{
let shard = ctx.shard.lock();
let shard_info = shard.shard_info();
println!("Shard {}", shard_info[0]);
}
if let Err(why) = msg.channel_id.say("Pong!") {
println!("Error sending message: {:?}", why);
}
}
}
fn on_ready(&self, _: Context, ready: Ready) {
println!("{} is connected!", ready.user.name);
}
}
fn main() {
// Configure the client with your Discord bot token in the environment.
let token = env::var("DISCORD_TOKEN")
.expect("Expected a token in the environment");
let mut client = Client::new(&token, Handler);
// The total number of shards to use. The "current shard number" of a
// shard - that is, the shard it is assigned to - is indexed at 0,
// while the total shard count is indexed at 1.
//
// This means if you have 5 shards, your total shard count will be 5, while
// each shard will be assigned numbers 0 through 4.
if let Err(why) = client.start_shards(2) |
}
| {
println!("Client error: {:?}", why);
} | conditional_block |
main.rs | extern crate serenity;
use serenity::prelude::*;
use serenity::model::*;
use std::env;
// Serenity implements transparent sharding in a way that you do not need to
// manually handle separate processes or connections manually.
//
// Transparent sharding is useful for a shared cache. Instead of having caches
// with duplicated data, a shared cache means all your data can be easily
// accessible across all shards.
//
// If your bot is on many guilds - or over the maximum of 2500 - then you
// should/must use guild sharding.
//
// This is an example file showing how guild sharding works. For this to
// properly be able to be seen in effect, your bot should be in at least 2
// guilds.
//
// Taking a scenario of 2 guilds, try saying "!ping" in one guild. It should
// print either "0" or "1" in the console. Saying "!ping" in the other guild,
// it should cache the other number in the console. This confirms that guild
// sharding works.
struct Handler;
impl EventHandler for Handler { | if msg.content == "!ping" {
// The current shard needs to be unlocked so it can be read from, as
// multiple threads may otherwise attempt to read from or mutate it
// concurrently.
{
let shard = ctx.shard.lock();
let shard_info = shard.shard_info();
println!("Shard {}", shard_info[0]);
}
if let Err(why) = msg.channel_id.say("Pong!") {
println!("Error sending message: {:?}", why);
}
}
}
fn on_ready(&self, _: Context, ready: Ready) {
println!("{} is connected!", ready.user.name);
}
}
fn main() {
// Configure the client with your Discord bot token in the environment.
let token = env::var("DISCORD_TOKEN")
.expect("Expected a token in the environment");
let mut client = Client::new(&token, Handler);
// The total number of shards to use. The "current shard number" of a
// shard - that is, the shard it is assigned to - is indexed at 0,
// while the total shard count is indexed at 1.
//
// This means if you have 5 shards, your total shard count will be 5, while
// each shard will be assigned numbers 0 through 4.
if let Err(why) = client.start_shards(2) {
println!("Client error: {:?}", why);
}
} | fn on_message(&self, ctx: Context, msg: Message) { | random_line_split |
std-smallintmap.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Microbenchmark for the smallintmap library
extern mod extra;
use extra::smallintmap::SmallIntMap;
use std::os;
use std::uint;
fn append_sequential(min: uint, max: uint, map: &mut SmallIntMap<uint>) {
for i in range(min, max) {
map.insert(i, i + 22u);
}
}
fn check_sequential(min: uint, max: uint, map: &SmallIntMap<uint>) {
for i in range(min, max) {
assert_eq!(*map.get(&i), i + 22u);
}
}
fn | () {
let args = os::args();
let args = if os::getenv("RUST_BENCH").is_some() {
~[~"", ~"100000", ~"100"]
} else if args.len() <= 1u {
~[~"", ~"10000", ~"50"]
} else {
args
};
let max = from_str::<uint>(args[1]).unwrap();
let rep = from_str::<uint>(args[2]).unwrap();
let mut checkf = 0.0;
let mut appendf = 0.0;
for _ in range(0u, rep) {
let mut map = SmallIntMap::new();
let start = extra::time::precise_time_s();
append_sequential(0u, max, &mut map);
let mid = extra::time::precise_time_s();
check_sequential(0u, max, &map);
let end = extra::time::precise_time_s();
checkf += (end - mid) as f64;
appendf += (mid - start) as f64;
}
let maxf = max as f64;
println!("insert(): {:?} seconds\n", checkf);
println!(" : {} op/sec\n", maxf/checkf);
println!("get() : {:?} seconds\n", appendf);
println!(" : {} op/sec\n", maxf/appendf);
}
| main | identifier_name |
std-smallintmap.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Microbenchmark for the smallintmap library
extern mod extra;
use extra::smallintmap::SmallIntMap;
use std::os;
use std::uint;
fn append_sequential(min: uint, max: uint, map: &mut SmallIntMap<uint>) {
for i in range(min, max) {
map.insert(i, i + 22u);
}
}
fn check_sequential(min: uint, max: uint, map: &SmallIntMap<uint>) |
fn main() {
let args = os::args();
let args = if os::getenv("RUST_BENCH").is_some() {
~[~"", ~"100000", ~"100"]
} else if args.len() <= 1u {
~[~"", ~"10000", ~"50"]
} else {
args
};
let max = from_str::<uint>(args[1]).unwrap();
let rep = from_str::<uint>(args[2]).unwrap();
let mut checkf = 0.0;
let mut appendf = 0.0;
for _ in range(0u, rep) {
let mut map = SmallIntMap::new();
let start = extra::time::precise_time_s();
append_sequential(0u, max, &mut map);
let mid = extra::time::precise_time_s();
check_sequential(0u, max, &map);
let end = extra::time::precise_time_s();
checkf += (end - mid) as f64;
appendf += (mid - start) as f64;
}
let maxf = max as f64;
println!("insert(): {:?} seconds\n", checkf);
println!(" : {} op/sec\n", maxf/checkf);
println!("get() : {:?} seconds\n", appendf);
println!(" : {} op/sec\n", maxf/appendf);
}
| {
for i in range(min, max) {
assert_eq!(*map.get(&i), i + 22u);
}
} | identifier_body |
std-smallintmap.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Microbenchmark for the smallintmap library
extern mod extra;
use extra::smallintmap::SmallIntMap;
use std::os;
use std::uint;
fn append_sequential(min: uint, max: uint, map: &mut SmallIntMap<uint>) {
for i in range(min, max) {
map.insert(i, i + 22u);
}
}
fn check_sequential(min: uint, max: uint, map: &SmallIntMap<uint>) {
for i in range(min, max) {
assert_eq!(*map.get(&i), i + 22u);
}
}
fn main() {
let args = os::args();
let args = if os::getenv("RUST_BENCH").is_some() {
~[~"", ~"100000", ~"100"]
} else if args.len() <= 1u {
~[~"", ~"10000", ~"50"]
} else | ;
let max = from_str::<uint>(args[1]).unwrap();
let rep = from_str::<uint>(args[2]).unwrap();
let mut checkf = 0.0;
let mut appendf = 0.0;
for _ in range(0u, rep) {
let mut map = SmallIntMap::new();
let start = extra::time::precise_time_s();
append_sequential(0u, max, &mut map);
let mid = extra::time::precise_time_s();
check_sequential(0u, max, &map);
let end = extra::time::precise_time_s();
checkf += (end - mid) as f64;
appendf += (mid - start) as f64;
}
let maxf = max as f64;
println!("insert(): {:?} seconds\n", checkf);
println!(" : {} op/sec\n", maxf/checkf);
println!("get() : {:?} seconds\n", appendf);
println!(" : {} op/sec\n", maxf/appendf);
}
| {
args
} | conditional_block |
std-smallintmap.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Microbenchmark for the smallintmap library
extern mod extra;
use extra::smallintmap::SmallIntMap;
use std::os;
use std::uint;
fn append_sequential(min: uint, max: uint, map: &mut SmallIntMap<uint>) {
for i in range(min, max) {
map.insert(i, i + 22u);
}
}
fn check_sequential(min: uint, max: uint, map: &SmallIntMap<uint>) {
for i in range(min, max) {
assert_eq!(*map.get(&i), i + 22u);
}
}
fn main() {
let args = os::args();
let args = if os::getenv("RUST_BENCH").is_some() {
~[~"", ~"100000", ~"100"]
} else if args.len() <= 1u {
~[~"", ~"10000", ~"50"]
} else {
args
};
let max = from_str::<uint>(args[1]).unwrap();
let rep = from_str::<uint>(args[2]).unwrap();
let mut checkf = 0.0; | append_sequential(0u, max, &mut map);
let mid = extra::time::precise_time_s();
check_sequential(0u, max, &map);
let end = extra::time::precise_time_s();
checkf += (end - mid) as f64;
appendf += (mid - start) as f64;
}
let maxf = max as f64;
println!("insert(): {:?} seconds\n", checkf);
println!(" : {} op/sec\n", maxf/checkf);
println!("get() : {:?} seconds\n", appendf);
println!(" : {} op/sec\n", maxf/appendf);
} | let mut appendf = 0.0;
for _ in range(0u, rep) {
let mut map = SmallIntMap::new();
let start = extra::time::precise_time_s(); | random_line_split |
wallet.rs | use indy::IndyError;
use indy::wallet;
use indy::future::Future;
pub const DEFAULT_WALLET_CREDENTIALS: &'static str = r#"{"key":"8dvfYSt5d1taSd6yJdpjq4emkwsPDDLYxkNFysFD2cZY", "key_derivation_method":"RAW"}"#;
pub fn create_wallet(config: &str) -> Result<(), IndyError> {
wallet::create_wallet(config, DEFAULT_WALLET_CREDENTIALS).wait()
}
pub fn open_wallet(config: &str) -> Result<i32, IndyError> {
wallet::open_wallet(config, DEFAULT_WALLET_CREDENTIALS).wait()
}
pub fn create_and_open_wallet() -> Result<i32, IndyError> {
let wallet_name = format!("default-wallet-name-{}", super::sequence::get_next_id());
let config = format!(r#"{{"id":"{}"}}"#, wallet_name);
create_wallet(&config)?;
open_wallet(&config)
}
pub fn close_wallet(wallet_handle: i32) -> Result<(), IndyError> | {
wallet::close_wallet(wallet_handle).wait()
} | identifier_body |
|
wallet.rs | use indy::IndyError;
use indy::wallet; |
pub const DEFAULT_WALLET_CREDENTIALS: &'static str = r#"{"key":"8dvfYSt5d1taSd6yJdpjq4emkwsPDDLYxkNFysFD2cZY", "key_derivation_method":"RAW"}"#;
pub fn create_wallet(config: &str) -> Result<(), IndyError> {
wallet::create_wallet(config, DEFAULT_WALLET_CREDENTIALS).wait()
}
pub fn open_wallet(config: &str) -> Result<i32, IndyError> {
wallet::open_wallet(config, DEFAULT_WALLET_CREDENTIALS).wait()
}
pub fn create_and_open_wallet() -> Result<i32, IndyError> {
let wallet_name = format!("default-wallet-name-{}", super::sequence::get_next_id());
let config = format!(r#"{{"id":"{}"}}"#, wallet_name);
create_wallet(&config)?;
open_wallet(&config)
}
pub fn close_wallet(wallet_handle: i32) -> Result<(), IndyError> {
wallet::close_wallet(wallet_handle).wait()
} | use indy::future::Future; | random_line_split |
wallet.rs | use indy::IndyError;
use indy::wallet;
use indy::future::Future;
pub const DEFAULT_WALLET_CREDENTIALS: &'static str = r#"{"key":"8dvfYSt5d1taSd6yJdpjq4emkwsPDDLYxkNFysFD2cZY", "key_derivation_method":"RAW"}"#;
pub fn create_wallet(config: &str) -> Result<(), IndyError> {
wallet::create_wallet(config, DEFAULT_WALLET_CREDENTIALS).wait()
}
pub fn open_wallet(config: &str) -> Result<i32, IndyError> {
wallet::open_wallet(config, DEFAULT_WALLET_CREDENTIALS).wait()
}
pub fn | () -> Result<i32, IndyError> {
let wallet_name = format!("default-wallet-name-{}", super::sequence::get_next_id());
let config = format!(r#"{{"id":"{}"}}"#, wallet_name);
create_wallet(&config)?;
open_wallet(&config)
}
pub fn close_wallet(wallet_handle: i32) -> Result<(), IndyError> {
wallet::close_wallet(wallet_handle).wait()
} | create_and_open_wallet | identifier_name |
architecture.rs | /*
* Panopticon - A libre disassembler
* Copyright (C) 2015, 2017 Panopticon authors
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
use panopticon_core::{Architecture, Match, Region, Result};
#[derive(Clone,Debug)]
pub enum Amd64 {}
#[derive(Clone,PartialEq,Copy,Debug)]
pub enum Mode {
Real, // Real mode / Virtual 8086 mode
Protected, // Protected mode / Long compatibility mode
Long, // Long 64-bit mode
}
impl Mode {
pub fn | (&self) -> usize {
match self {
&Mode::Real => 32,
&Mode::Protected => 16,
&Mode::Long => 16,
}
}
pub fn bits(&self) -> usize {
match self {
&Mode::Real => 16,
&Mode::Protected => 32,
&Mode::Long => 64,
}
}
}
impl Architecture for Amd64 {
type Token = u8;
type Configuration = Mode;
fn prepare(_: &Region, _: &Self::Configuration) -> Result<Vec<(&'static str, u64, &'static str)>> {
Ok(vec![])
}
fn decode(reg: &Region, start: u64, cfg: &Self::Configuration) -> Result<Match<Self>> {
let data = reg.iter();
let mut buf: Vec<u8> = vec![];
let mut i = data.seek(start);
let p = start;
while let Some(Some(b)) = i.next() {
buf.push(b);
if buf.len() == 15 {
break;
}
}
debug!("disass @ {:#x}: {:?}", p, buf);
let ret = crate::disassembler::read(*cfg, &buf, p).and_then(
|(len, mne, mut jmp)| {
Ok(
Match::<Amd64> {
tokens: buf[0..len as usize].to_vec(),
mnemonics: vec![mne],
jumps: jmp.drain(..).map(|x| (p, x.0, x.1)).collect::<Vec<_>>(),
configuration: cfg.clone(),
}
)
}
);
debug!(" res: {:?}", ret);
ret
}
}
| alt_bits | identifier_name |
architecture.rs | /*
* Panopticon - A libre disassembler
* Copyright (C) 2015, 2017 Panopticon authors
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
use panopticon_core::{Architecture, Match, Region, Result};
#[derive(Clone,Debug)]
pub enum Amd64 {}
#[derive(Clone,PartialEq,Copy,Debug)]
pub enum Mode {
Real, // Real mode / Virtual 8086 mode
Protected, // Protected mode / Long compatibility mode
Long, // Long 64-bit mode
}
impl Mode {
pub fn alt_bits(&self) -> usize {
match self {
&Mode::Real => 32,
&Mode::Protected => 16,
&Mode::Long => 16,
}
}
pub fn bits(&self) -> usize {
match self {
&Mode::Real => 16,
&Mode::Protected => 32,
&Mode::Long => 64,
}
}
}
impl Architecture for Amd64 {
type Token = u8;
type Configuration = Mode;
fn prepare(_: &Region, _: &Self::Configuration) -> Result<Vec<(&'static str, u64, &'static str)>> {
Ok(vec![])
}
fn decode(reg: &Region, start: u64, cfg: &Self::Configuration) -> Result<Match<Self>> {
let data = reg.iter();
let mut buf: Vec<u8> = vec![];
let mut i = data.seek(start);
let p = start;
while let Some(Some(b)) = i.next() {
buf.push(b);
if buf.len() == 15 {
break;
} |
let ret = crate::disassembler::read(*cfg, &buf, p).and_then(
|(len, mne, mut jmp)| {
Ok(
Match::<Amd64> {
tokens: buf[0..len as usize].to_vec(),
mnemonics: vec![mne],
jumps: jmp.drain(..).map(|x| (p, x.0, x.1)).collect::<Vec<_>>(),
configuration: cfg.clone(),
}
)
}
);
debug!(" res: {:?}", ret);
ret
}
} | }
debug!("disass @ {:#x}: {:?}", p, buf); | random_line_split |
uievent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use dom::bindings::codegen::Bindings::UIEventBinding;
use dom::bindings::codegen::Bindings::UIEventBinding::UIEventMethods;
use dom::bindings::codegen::InheritTypes::{EventCast, UIEventDerived};
use dom::bindings::error::Fallible;
use dom::bindings::global::{GlobalRef, Window};
use dom::bindings::js::{JS, JSRef, RootedReference, Temporary, OptionalSettable};
use dom::bindings::trace::Traceable;
use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object};
use dom::event::{Event, EventTypeId, UIEventTypeId};
use dom::window::Window;
use servo_util::str::DOMString;
use std::cell::Cell;
#[deriving(Encodable)]
#[must_root]
pub struct UIEvent {
pub event: Event,
view: Cell<Option<JS<Window>>>,
detail: Traceable<Cell<i32>>
}
impl UIEventDerived for Event {
fn is_uievent(&self) -> bool {
self.type_id == UIEventTypeId
}
}
impl UIEvent {
pub fn new_inherited(type_id: EventTypeId) -> UIEvent {
UIEvent {
event: Event::new_inherited(type_id),
view: Cell::new(None),
detail: Traceable::new(Cell::new(0)),
}
}
pub fn new_uninitialized(window: JSRef<Window>) -> Temporary<UIEvent> {
reflect_dom_object(box UIEvent::new_inherited(UIEventTypeId),
&Window(window),
UIEventBinding::Wrap)
} | can_bubble: bool,
cancelable: bool,
view: Option<JSRef<Window>>,
detail: i32) -> Temporary<UIEvent> {
let ev = UIEvent::new_uninitialized(window).root();
ev.deref().InitUIEvent(type_, can_bubble, cancelable, view, detail);
Temporary::from_rooted(*ev)
}
pub fn Constructor(global: &GlobalRef,
type_: DOMString,
init: &UIEventBinding::UIEventInit) -> Fallible<Temporary<UIEvent>> {
let event = UIEvent::new(global.as_window(), type_,
init.parent.bubbles, init.parent.cancelable,
init.view.root_ref(), init.detail);
Ok(event)
}
}
impl<'a> UIEventMethods for JSRef<'a, UIEvent> {
fn GetView(self) -> Option<Temporary<Window>> {
self.view.get().map(|view| Temporary::new(view))
}
fn Detail(self) -> i32 {
self.detail.deref().get()
}
fn InitUIEvent(self,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<JSRef<Window>>,
detail: i32) {
let event: JSRef<Event> = EventCast::from_ref(self);
event.InitEvent(type_, can_bubble, cancelable);
self.view.assign(view);
self.detail.deref().set(detail);
}
}
impl Reflectable for UIEvent {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.event.reflector()
}
} |
pub fn new(window: JSRef<Window>,
type_: DOMString, | random_line_split |
uievent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use dom::bindings::codegen::Bindings::UIEventBinding;
use dom::bindings::codegen::Bindings::UIEventBinding::UIEventMethods;
use dom::bindings::codegen::InheritTypes::{EventCast, UIEventDerived};
use dom::bindings::error::Fallible;
use dom::bindings::global::{GlobalRef, Window};
use dom::bindings::js::{JS, JSRef, RootedReference, Temporary, OptionalSettable};
use dom::bindings::trace::Traceable;
use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object};
use dom::event::{Event, EventTypeId, UIEventTypeId};
use dom::window::Window;
use servo_util::str::DOMString;
use std::cell::Cell;
#[deriving(Encodable)]
#[must_root]
pub struct | {
pub event: Event,
view: Cell<Option<JS<Window>>>,
detail: Traceable<Cell<i32>>
}
impl UIEventDerived for Event {
fn is_uievent(&self) -> bool {
self.type_id == UIEventTypeId
}
}
impl UIEvent {
pub fn new_inherited(type_id: EventTypeId) -> UIEvent {
UIEvent {
event: Event::new_inherited(type_id),
view: Cell::new(None),
detail: Traceable::new(Cell::new(0)),
}
}
pub fn new_uninitialized(window: JSRef<Window>) -> Temporary<UIEvent> {
reflect_dom_object(box UIEvent::new_inherited(UIEventTypeId),
&Window(window),
UIEventBinding::Wrap)
}
pub fn new(window: JSRef<Window>,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<JSRef<Window>>,
detail: i32) -> Temporary<UIEvent> {
let ev = UIEvent::new_uninitialized(window).root();
ev.deref().InitUIEvent(type_, can_bubble, cancelable, view, detail);
Temporary::from_rooted(*ev)
}
pub fn Constructor(global: &GlobalRef,
type_: DOMString,
init: &UIEventBinding::UIEventInit) -> Fallible<Temporary<UIEvent>> {
let event = UIEvent::new(global.as_window(), type_,
init.parent.bubbles, init.parent.cancelable,
init.view.root_ref(), init.detail);
Ok(event)
}
}
impl<'a> UIEventMethods for JSRef<'a, UIEvent> {
fn GetView(self) -> Option<Temporary<Window>> {
self.view.get().map(|view| Temporary::new(view))
}
fn Detail(self) -> i32 {
self.detail.deref().get()
}
fn InitUIEvent(self,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<JSRef<Window>>,
detail: i32) {
let event: JSRef<Event> = EventCast::from_ref(self);
event.InitEvent(type_, can_bubble, cancelable);
self.view.assign(view);
self.detail.deref().set(detail);
}
}
impl Reflectable for UIEvent {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.event.reflector()
}
}
| UIEvent | identifier_name |
uievent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use dom::bindings::codegen::Bindings::UIEventBinding;
use dom::bindings::codegen::Bindings::UIEventBinding::UIEventMethods;
use dom::bindings::codegen::InheritTypes::{EventCast, UIEventDerived};
use dom::bindings::error::Fallible;
use dom::bindings::global::{GlobalRef, Window};
use dom::bindings::js::{JS, JSRef, RootedReference, Temporary, OptionalSettable};
use dom::bindings::trace::Traceable;
use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object};
use dom::event::{Event, EventTypeId, UIEventTypeId};
use dom::window::Window;
use servo_util::str::DOMString;
use std::cell::Cell;
#[deriving(Encodable)]
#[must_root]
pub struct UIEvent {
pub event: Event,
view: Cell<Option<JS<Window>>>,
detail: Traceable<Cell<i32>>
}
impl UIEventDerived for Event {
fn is_uievent(&self) -> bool {
self.type_id == UIEventTypeId
}
}
impl UIEvent {
pub fn new_inherited(type_id: EventTypeId) -> UIEvent |
pub fn new_uninitialized(window: JSRef<Window>) -> Temporary<UIEvent> {
reflect_dom_object(box UIEvent::new_inherited(UIEventTypeId),
&Window(window),
UIEventBinding::Wrap)
}
pub fn new(window: JSRef<Window>,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<JSRef<Window>>,
detail: i32) -> Temporary<UIEvent> {
let ev = UIEvent::new_uninitialized(window).root();
ev.deref().InitUIEvent(type_, can_bubble, cancelable, view, detail);
Temporary::from_rooted(*ev)
}
pub fn Constructor(global: &GlobalRef,
type_: DOMString,
init: &UIEventBinding::UIEventInit) -> Fallible<Temporary<UIEvent>> {
let event = UIEvent::new(global.as_window(), type_,
init.parent.bubbles, init.parent.cancelable,
init.view.root_ref(), init.detail);
Ok(event)
}
}
impl<'a> UIEventMethods for JSRef<'a, UIEvent> {
fn GetView(self) -> Option<Temporary<Window>> {
self.view.get().map(|view| Temporary::new(view))
}
fn Detail(self) -> i32 {
self.detail.deref().get()
}
fn InitUIEvent(self,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<JSRef<Window>>,
detail: i32) {
let event: JSRef<Event> = EventCast::from_ref(self);
event.InitEvent(type_, can_bubble, cancelable);
self.view.assign(view);
self.detail.deref().set(detail);
}
}
impl Reflectable for UIEvent {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.event.reflector()
}
}
| {
UIEvent {
event: Event::new_inherited(type_id),
view: Cell::new(None),
detail: Traceable::new(Cell::new(0)),
}
} | identifier_body |
regions-steal-closure.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
#![feature(unboxed_closures)]
struct closure_box<'a> {
cl: Box<FnMut() + 'a>,
} |
fn main() {
let cl_box = {
let mut i = 3i;
box_it(box || i += 1) //~ ERROR cannot infer
};
cl_box.cl.call_mut(());
} |
fn box_it<'r>(x: Box<FnMut() + 'r>) -> closure_box<'r> {
closure_box {cl: x}
} | random_line_split |
regions-steal-closure.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
#![feature(unboxed_closures)]
struct | <'a> {
cl: Box<FnMut() + 'a>,
}
fn box_it<'r>(x: Box<FnMut() + 'r>) -> closure_box<'r> {
closure_box {cl: x}
}
fn main() {
let cl_box = {
let mut i = 3i;
box_it(box || i += 1) //~ ERROR cannot infer
};
cl_box.cl.call_mut(());
}
| closure_box | identifier_name |
regions-steal-closure.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
#![feature(unboxed_closures)]
struct closure_box<'a> {
cl: Box<FnMut() + 'a>,
}
fn box_it<'r>(x: Box<FnMut() + 'r>) -> closure_box<'r> {
closure_box {cl: x}
}
fn main() | {
let cl_box = {
let mut i = 3i;
box_it(box || i += 1) //~ ERROR cannot infer
};
cl_box.cl.call_mut(());
} | identifier_body |
|
options.rs | // The MIT License (MIT)
// Copyright (c) 2015 Y. T. Chung <[email protected]>
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//! Coroutine options
use std::rt;
use std::default::Default;
/// Coroutine options
pub struct Options {
pub stack_size: usize,
pub name: Option<String>,
}
impl Options {
pub fn | () -> Options {
Options {
stack_size: rt::min_stack(),
name: None,
}
}
pub fn stack_size(mut self, size: usize) -> Options {
self.stack_size = size;
self
}
pub fn name(mut self, name: Option<String>) -> Options {
self.name = name;
self
}
}
impl Default for Options {
fn default() -> Options {
Options::new()
}
}
| new | identifier_name |
options.rs | // The MIT License (MIT)
// Copyright (c) 2015 Y. T. Chung <[email protected]>
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//! Coroutine options
use std::rt;
use std::default::Default;
| pub name: Option<String>,
}
impl Options {
pub fn new() -> Options {
Options {
stack_size: rt::min_stack(),
name: None,
}
}
pub fn stack_size(mut self, size: usize) -> Options {
self.stack_size = size;
self
}
pub fn name(mut self, name: Option<String>) -> Options {
self.name = name;
self
}
}
impl Default for Options {
fn default() -> Options {
Options::new()
}
} | /// Coroutine options
pub struct Options {
pub stack_size: usize, | random_line_split |
map_clone.rs | use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::is_trait_method;
use clippy_utils::remove_blocks;
use clippy_utils::source::snippet_with_applicability;
use clippy_utils::ty::{is_copy, is_type_diagnostic_item};
use if_chain::if_chain;
use rustc_errors::Applicability;
use rustc_hir as hir;
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::mir::Mutability;
use rustc_middle::ty;
use rustc_middle::ty::adjustment::Adjust;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::symbol::Ident;
use rustc_span::{sym, Span};
declare_clippy_lint! {
/// ### What it does
/// Checks for usage of `map(|x| x.clone())` or
/// dereferencing closures for `Copy` types, on `Iterator` or `Option`,
/// and suggests `cloned()` or `copied()` instead
///
/// ### Why is this bad?
/// Readability, this can be written more concisely
///
/// ### Example
/// ```rust
/// let x = vec![42, 43];
/// let y = x.iter();
/// let z = y.map(|i| *i);
/// ```
///
/// The correct use would be:
///
/// ```rust
/// let x = vec![42, 43];
/// let y = x.iter();
/// let z = y.cloned();
/// ```
pub MAP_CLONE,
style,
"using `iterator.map(|x| x.clone())`, or dereferencing closures for `Copy` types"
}
declare_lint_pass!(MapClone => [MAP_CLONE]);
impl<'tcx> LateLintPass<'tcx> for MapClone {
fn check_expr(&mut self, cx: &LateContext<'_>, e: &hir::Expr<'_>) {
if e.span.from_expansion() {
return;
}
if_chain! {
if let hir::ExprKind::MethodCall(method, _, args, _) = e.kind;
if args.len() == 2;
if method.ident.name == sym::map;
let ty = cx.typeck_results().expr_ty(&args[0]);
if is_type_diagnostic_item(cx, ty, sym::Option) || is_trait_method(cx, e, sym::Iterator);
if let hir::ExprKind::Closure(_, _, body_id, _, _) = args[1].kind;
then {
let closure_body = cx.tcx.hir().body(body_id);
let closure_expr = remove_blocks(&closure_body.value);
match closure_body.params[0].pat.kind {
hir::PatKind::Ref(inner, hir::Mutability::Not) => if let hir::PatKind::Binding(
hir::BindingAnnotation::Unannotated,.., name, None
) = inner.kind {
if ident_eq(name, closure_expr) {
lint(cx, e.span, args[0].span, true);
}
},
hir::PatKind::Binding(hir::BindingAnnotation::Unannotated,.., name, None) => {
match closure_expr.kind {
hir::ExprKind::Unary(hir::UnOp::Deref, inner) => {
if ident_eq(name, inner) {
if let ty::Ref(.., Mutability::Not) = cx.typeck_results().expr_ty(inner).kind() {
lint(cx, e.span, args[0].span, true);
}
}
},
hir::ExprKind::MethodCall(method, _, [obj], _) => if_chain! {
if ident_eq(name, obj) && method.ident.name == sym::clone;
if let Some(fn_id) = cx.typeck_results().type_dependent_def_id(closure_expr.hir_id);
if let Some(trait_id) = cx.tcx.trait_of_item(fn_id);
if cx.tcx.lang_items().clone_trait().map_or(false, |id| id == trait_id);
// no autoderefs
if!cx.typeck_results().expr_adjustments(obj).iter()
.any(|a| matches!(a.kind, Adjust::Deref(Some(..))));
then {
let obj_ty = cx.typeck_results().expr_ty(obj);
if let ty::Ref(_, ty, mutability) = obj_ty.kind() {
if matches!(mutability, Mutability::Not) {
let copy = is_copy(cx, ty);
lint(cx, e.span, args[0].span, copy);
}
} else {
lint_needless_cloning(cx, e.span, args[0].span);
}
}
},
_ => {},
}
},
_ => {},
}
}
}
}
}
fn ident_eq(name: Ident, path: &hir::Expr<'_>) -> bool {
if let hir::ExprKind::Path(hir::QPath::Resolved(None, path)) = path.kind {
path.segments.len() == 1 && path.segments[0].ident == name
} else {
false
}
}
fn lint_needless_cloning(cx: &LateContext<'_>, root: Span, receiver: Span) {
span_lint_and_sugg(
cx,
MAP_CLONE,
root.trim_start(receiver).unwrap(),
"you are needlessly cloning iterator elements",
"remove the `map` call",
String::new(),
Applicability::MachineApplicable,
);
}
fn lint(cx: &LateContext<'_>, replace: Span, root: Span, copied: bool) {
let mut applicability = Applicability::MachineApplicable;
if copied {
span_lint_and_sugg(
cx,
MAP_CLONE,
replace,
"you are using an explicit closure for copying elements",
"consider calling the dedicated `copied` method",
format!(
"{}.copied()",
snippet_with_applicability(cx, root, "..", &mut applicability)
),
applicability,
);
} else {
span_lint_and_sugg(
cx,
MAP_CLONE,
replace,
"you are using an explicit closure for cloning elements",
"consider calling the dedicated `cloned` method",
format!(
"{}.cloned()",
snippet_with_applicability(cx, root, "..", &mut applicability)
), | applicability,
);
}
} | random_line_split |
|
map_clone.rs | use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::is_trait_method;
use clippy_utils::remove_blocks;
use clippy_utils::source::snippet_with_applicability;
use clippy_utils::ty::{is_copy, is_type_diagnostic_item};
use if_chain::if_chain;
use rustc_errors::Applicability;
use rustc_hir as hir;
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::mir::Mutability;
use rustc_middle::ty;
use rustc_middle::ty::adjustment::Adjust;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::symbol::Ident;
use rustc_span::{sym, Span};
declare_clippy_lint! {
/// ### What it does
/// Checks for usage of `map(|x| x.clone())` or
/// dereferencing closures for `Copy` types, on `Iterator` or `Option`,
/// and suggests `cloned()` or `copied()` instead
///
/// ### Why is this bad?
/// Readability, this can be written more concisely
///
/// ### Example
/// ```rust
/// let x = vec![42, 43];
/// let y = x.iter();
/// let z = y.map(|i| *i);
/// ```
///
/// The correct use would be:
///
/// ```rust
/// let x = vec![42, 43];
/// let y = x.iter();
/// let z = y.cloned();
/// ```
pub MAP_CLONE,
style,
"using `iterator.map(|x| x.clone())`, or dereferencing closures for `Copy` types"
}
declare_lint_pass!(MapClone => [MAP_CLONE]);
impl<'tcx> LateLintPass<'tcx> for MapClone {
fn check_expr(&mut self, cx: &LateContext<'_>, e: &hir::Expr<'_>) {
if e.span.from_expansion() {
return;
}
if_chain! {
if let hir::ExprKind::MethodCall(method, _, args, _) = e.kind;
if args.len() == 2;
if method.ident.name == sym::map;
let ty = cx.typeck_results().expr_ty(&args[0]);
if is_type_diagnostic_item(cx, ty, sym::Option) || is_trait_method(cx, e, sym::Iterator);
if let hir::ExprKind::Closure(_, _, body_id, _, _) = args[1].kind;
then {
let closure_body = cx.tcx.hir().body(body_id);
let closure_expr = remove_blocks(&closure_body.value);
match closure_body.params[0].pat.kind {
hir::PatKind::Ref(inner, hir::Mutability::Not) => if let hir::PatKind::Binding(
hir::BindingAnnotation::Unannotated,.., name, None
) = inner.kind {
if ident_eq(name, closure_expr) {
lint(cx, e.span, args[0].span, true);
}
},
hir::PatKind::Binding(hir::BindingAnnotation::Unannotated,.., name, None) => {
match closure_expr.kind {
hir::ExprKind::Unary(hir::UnOp::Deref, inner) => {
if ident_eq(name, inner) {
if let ty::Ref(.., Mutability::Not) = cx.typeck_results().expr_ty(inner).kind() {
lint(cx, e.span, args[0].span, true);
}
}
},
hir::ExprKind::MethodCall(method, _, [obj], _) => if_chain! {
if ident_eq(name, obj) && method.ident.name == sym::clone;
if let Some(fn_id) = cx.typeck_results().type_dependent_def_id(closure_expr.hir_id);
if let Some(trait_id) = cx.tcx.trait_of_item(fn_id);
if cx.tcx.lang_items().clone_trait().map_or(false, |id| id == trait_id);
// no autoderefs
if!cx.typeck_results().expr_adjustments(obj).iter()
.any(|a| matches!(a.kind, Adjust::Deref(Some(..))));
then {
let obj_ty = cx.typeck_results().expr_ty(obj);
if let ty::Ref(_, ty, mutability) = obj_ty.kind() {
if matches!(mutability, Mutability::Not) {
let copy = is_copy(cx, ty);
lint(cx, e.span, args[0].span, copy);
}
} else {
lint_needless_cloning(cx, e.span, args[0].span);
}
}
},
_ => {},
}
},
_ => {},
}
}
}
}
}
fn ident_eq(name: Ident, path: &hir::Expr<'_>) -> bool {
if let hir::ExprKind::Path(hir::QPath::Resolved(None, path)) = path.kind {
path.segments.len() == 1 && path.segments[0].ident == name
} else {
false
}
}
fn lint_needless_cloning(cx: &LateContext<'_>, root: Span, receiver: Span) {
span_lint_and_sugg(
cx,
MAP_CLONE,
root.trim_start(receiver).unwrap(),
"you are needlessly cloning iterator elements",
"remove the `map` call",
String::new(),
Applicability::MachineApplicable,
);
}
fn | (cx: &LateContext<'_>, replace: Span, root: Span, copied: bool) {
let mut applicability = Applicability::MachineApplicable;
if copied {
span_lint_and_sugg(
cx,
MAP_CLONE,
replace,
"you are using an explicit closure for copying elements",
"consider calling the dedicated `copied` method",
format!(
"{}.copied()",
snippet_with_applicability(cx, root, "..", &mut applicability)
),
applicability,
);
} else {
span_lint_and_sugg(
cx,
MAP_CLONE,
replace,
"you are using an explicit closure for cloning elements",
"consider calling the dedicated `cloned` method",
format!(
"{}.cloned()",
snippet_with_applicability(cx, root, "..", &mut applicability)
),
applicability,
);
}
}
| lint | identifier_name |
map_clone.rs | use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::is_trait_method;
use clippy_utils::remove_blocks;
use clippy_utils::source::snippet_with_applicability;
use clippy_utils::ty::{is_copy, is_type_diagnostic_item};
use if_chain::if_chain;
use rustc_errors::Applicability;
use rustc_hir as hir;
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::mir::Mutability;
use rustc_middle::ty;
use rustc_middle::ty::adjustment::Adjust;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::symbol::Ident;
use rustc_span::{sym, Span};
declare_clippy_lint! {
/// ### What it does
/// Checks for usage of `map(|x| x.clone())` or
/// dereferencing closures for `Copy` types, on `Iterator` or `Option`,
/// and suggests `cloned()` or `copied()` instead
///
/// ### Why is this bad?
/// Readability, this can be written more concisely
///
/// ### Example
/// ```rust
/// let x = vec![42, 43];
/// let y = x.iter();
/// let z = y.map(|i| *i);
/// ```
///
/// The correct use would be:
///
/// ```rust
/// let x = vec![42, 43];
/// let y = x.iter();
/// let z = y.cloned();
/// ```
pub MAP_CLONE,
style,
"using `iterator.map(|x| x.clone())`, or dereferencing closures for `Copy` types"
}
declare_lint_pass!(MapClone => [MAP_CLONE]);
impl<'tcx> LateLintPass<'tcx> for MapClone {
fn check_expr(&mut self, cx: &LateContext<'_>, e: &hir::Expr<'_>) {
if e.span.from_expansion() {
return;
}
if_chain! {
if let hir::ExprKind::MethodCall(method, _, args, _) = e.kind;
if args.len() == 2;
if method.ident.name == sym::map;
let ty = cx.typeck_results().expr_ty(&args[0]);
if is_type_diagnostic_item(cx, ty, sym::Option) || is_trait_method(cx, e, sym::Iterator);
if let hir::ExprKind::Closure(_, _, body_id, _, _) = args[1].kind;
then {
let closure_body = cx.tcx.hir().body(body_id);
let closure_expr = remove_blocks(&closure_body.value);
match closure_body.params[0].pat.kind {
hir::PatKind::Ref(inner, hir::Mutability::Not) => if let hir::PatKind::Binding(
hir::BindingAnnotation::Unannotated,.., name, None
) = inner.kind {
if ident_eq(name, closure_expr) {
lint(cx, e.span, args[0].span, true);
}
},
hir::PatKind::Binding(hir::BindingAnnotation::Unannotated,.., name, None) => {
match closure_expr.kind {
hir::ExprKind::Unary(hir::UnOp::Deref, inner) => {
if ident_eq(name, inner) {
if let ty::Ref(.., Mutability::Not) = cx.typeck_results().expr_ty(inner).kind() {
lint(cx, e.span, args[0].span, true);
}
}
},
hir::ExprKind::MethodCall(method, _, [obj], _) => if_chain! {
if ident_eq(name, obj) && method.ident.name == sym::clone;
if let Some(fn_id) = cx.typeck_results().type_dependent_def_id(closure_expr.hir_id);
if let Some(trait_id) = cx.tcx.trait_of_item(fn_id);
if cx.tcx.lang_items().clone_trait().map_or(false, |id| id == trait_id);
// no autoderefs
if!cx.typeck_results().expr_adjustments(obj).iter()
.any(|a| matches!(a.kind, Adjust::Deref(Some(..))));
then {
let obj_ty = cx.typeck_results().expr_ty(obj);
if let ty::Ref(_, ty, mutability) = obj_ty.kind() {
if matches!(mutability, Mutability::Not) {
let copy = is_copy(cx, ty);
lint(cx, e.span, args[0].span, copy);
}
} else {
lint_needless_cloning(cx, e.span, args[0].span);
}
}
},
_ => {},
}
},
_ => {},
}
}
}
}
}
fn ident_eq(name: Ident, path: &hir::Expr<'_>) -> bool {
if let hir::ExprKind::Path(hir::QPath::Resolved(None, path)) = path.kind {
path.segments.len() == 1 && path.segments[0].ident == name
} else {
false
}
}
fn lint_needless_cloning(cx: &LateContext<'_>, root: Span, receiver: Span) {
span_lint_and_sugg(
cx,
MAP_CLONE,
root.trim_start(receiver).unwrap(),
"you are needlessly cloning iterator elements",
"remove the `map` call",
String::new(),
Applicability::MachineApplicable,
);
}
fn lint(cx: &LateContext<'_>, replace: Span, root: Span, copied: bool) {
let mut applicability = Applicability::MachineApplicable;
if copied {
span_lint_and_sugg(
cx,
MAP_CLONE,
replace,
"you are using an explicit closure for copying elements",
"consider calling the dedicated `copied` method",
format!(
"{}.copied()",
snippet_with_applicability(cx, root, "..", &mut applicability)
),
applicability,
);
} else |
}
| {
span_lint_and_sugg(
cx,
MAP_CLONE,
replace,
"you are using an explicit closure for cloning elements",
"consider calling the dedicated `cloned` method",
format!(
"{}.cloned()",
snippet_with_applicability(cx, root, "..", &mut applicability)
),
applicability,
);
} | conditional_block |
map_clone.rs | use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::is_trait_method;
use clippy_utils::remove_blocks;
use clippy_utils::source::snippet_with_applicability;
use clippy_utils::ty::{is_copy, is_type_diagnostic_item};
use if_chain::if_chain;
use rustc_errors::Applicability;
use rustc_hir as hir;
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::mir::Mutability;
use rustc_middle::ty;
use rustc_middle::ty::adjustment::Adjust;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::symbol::Ident;
use rustc_span::{sym, Span};
declare_clippy_lint! {
/// ### What it does
/// Checks for usage of `map(|x| x.clone())` or
/// dereferencing closures for `Copy` types, on `Iterator` or `Option`,
/// and suggests `cloned()` or `copied()` instead
///
/// ### Why is this bad?
/// Readability, this can be written more concisely
///
/// ### Example
/// ```rust
/// let x = vec![42, 43];
/// let y = x.iter();
/// let z = y.map(|i| *i);
/// ```
///
/// The correct use would be:
///
/// ```rust
/// let x = vec![42, 43];
/// let y = x.iter();
/// let z = y.cloned();
/// ```
pub MAP_CLONE,
style,
"using `iterator.map(|x| x.clone())`, or dereferencing closures for `Copy` types"
}
declare_lint_pass!(MapClone => [MAP_CLONE]);
impl<'tcx> LateLintPass<'tcx> for MapClone {
fn check_expr(&mut self, cx: &LateContext<'_>, e: &hir::Expr<'_>) {
if e.span.from_expansion() {
return;
}
if_chain! {
if let hir::ExprKind::MethodCall(method, _, args, _) = e.kind;
if args.len() == 2;
if method.ident.name == sym::map;
let ty = cx.typeck_results().expr_ty(&args[0]);
if is_type_diagnostic_item(cx, ty, sym::Option) || is_trait_method(cx, e, sym::Iterator);
if let hir::ExprKind::Closure(_, _, body_id, _, _) = args[1].kind;
then {
let closure_body = cx.tcx.hir().body(body_id);
let closure_expr = remove_blocks(&closure_body.value);
match closure_body.params[0].pat.kind {
hir::PatKind::Ref(inner, hir::Mutability::Not) => if let hir::PatKind::Binding(
hir::BindingAnnotation::Unannotated,.., name, None
) = inner.kind {
if ident_eq(name, closure_expr) {
lint(cx, e.span, args[0].span, true);
}
},
hir::PatKind::Binding(hir::BindingAnnotation::Unannotated,.., name, None) => {
match closure_expr.kind {
hir::ExprKind::Unary(hir::UnOp::Deref, inner) => {
if ident_eq(name, inner) {
if let ty::Ref(.., Mutability::Not) = cx.typeck_results().expr_ty(inner).kind() {
lint(cx, e.span, args[0].span, true);
}
}
},
hir::ExprKind::MethodCall(method, _, [obj], _) => if_chain! {
if ident_eq(name, obj) && method.ident.name == sym::clone;
if let Some(fn_id) = cx.typeck_results().type_dependent_def_id(closure_expr.hir_id);
if let Some(trait_id) = cx.tcx.trait_of_item(fn_id);
if cx.tcx.lang_items().clone_trait().map_or(false, |id| id == trait_id);
// no autoderefs
if!cx.typeck_results().expr_adjustments(obj).iter()
.any(|a| matches!(a.kind, Adjust::Deref(Some(..))));
then {
let obj_ty = cx.typeck_results().expr_ty(obj);
if let ty::Ref(_, ty, mutability) = obj_ty.kind() {
if matches!(mutability, Mutability::Not) {
let copy = is_copy(cx, ty);
lint(cx, e.span, args[0].span, copy);
}
} else {
lint_needless_cloning(cx, e.span, args[0].span);
}
}
},
_ => {},
}
},
_ => {},
}
}
}
}
}
fn ident_eq(name: Ident, path: &hir::Expr<'_>) -> bool {
if let hir::ExprKind::Path(hir::QPath::Resolved(None, path)) = path.kind {
path.segments.len() == 1 && path.segments[0].ident == name
} else {
false
}
}
fn lint_needless_cloning(cx: &LateContext<'_>, root: Span, receiver: Span) {
span_lint_and_sugg(
cx,
MAP_CLONE,
root.trim_start(receiver).unwrap(),
"you are needlessly cloning iterator elements",
"remove the `map` call",
String::new(),
Applicability::MachineApplicable,
);
}
fn lint(cx: &LateContext<'_>, replace: Span, root: Span, copied: bool) | "you are using an explicit closure for cloning elements",
"consider calling the dedicated `cloned` method",
format!(
"{}.cloned()",
snippet_with_applicability(cx, root, "..", &mut applicability)
),
applicability,
);
}
}
| {
let mut applicability = Applicability::MachineApplicable;
if copied {
span_lint_and_sugg(
cx,
MAP_CLONE,
replace,
"you are using an explicit closure for copying elements",
"consider calling the dedicated `copied` method",
format!(
"{}.copied()",
snippet_with_applicability(cx, root, "..", &mut applicability)
),
applicability,
);
} else {
span_lint_and_sugg(
cx,
MAP_CLONE,
replace, | identifier_body |
gamma.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
// ignore-lexer-test FIXME #15679
//! The Gamma and derived distributions.
use self::GammaRepr::*;
use self::ChiSquaredRepr::*;
use core::num::Float;
use {Rng, Open01};
use super::normal::StandardNormal;
use super::{IndependentSample, Sample, Exp};
/// The Gamma distribution `Gamma(shape, scale)` distribution.
///
/// The density function of this distribution is
///
/// ```text
/// f(x) = x^(k - 1) * exp(-x / θ) / (Γ(k) * θ^k)
/// ```
///
/// where `Γ` is the Gamma function, `k` is the shape and `θ` is the
/// scale and both `k` and `θ` are strictly positive.
///
/// The algorithm used is that described by Marsaglia & Tsang 2000[1],
/// falling back to directly sampling from an Exponential for `shape
/// == 1`, and using the boosting technique described in [1] for
/// `shape < 1`.
///
/// # Example
///
/// ```rust
/// use rand::distributions::{IndependentSample, Gamma};
///
/// let gamma = Gamma::new(2.0, 5.0);
/// let v = gamma.ind_sample(&mut rand::thread_rng());
/// println!("{} is from a Gamma(2, 5) distribution", v);
/// ```
///
/// [1]: George Marsaglia and Wai Wan Tsang. 2000. "A Simple Method
/// for Generating Gamma Variables" *ACM Trans. Math. Softw.* 26, 3
/// (September 2000),
/// 363-372. DOI:[10.1145/358407.358414](http://doi.acm.org/10.1145/358407.358414)
pub struct Gamma {
repr: GammaRepr,
}
enum GammaRepr {
Large(GammaLargeShape),
One(Exp),
Small(GammaSmallShape)
}
// These two helpers could be made public, but saving the
// match-on-Gamma-enum branch from using them directly (e.g. if one
// knows that the shape is always > 1) doesn't appear to be much
// faster.
/// Gamma distribution where the shape parameter is less than 1.
///
/// Note, samples from this require a compulsory floating-point `pow`
/// call, which makes it significantly slower than sampling from a
/// gamma distribution where the shape parameter is greater than or
/// equal to 1.
/// | /// See `Gamma` for sampling from a Gamma distribution with general
/// shape parameters.
struct GammaSmallShape {
inv_shape: f64,
large_shape: GammaLargeShape
}
/// Gamma distribution where the shape parameter is larger than 1.
///
/// See `Gamma` for sampling from a Gamma distribution with general
/// shape parameters.
struct GammaLargeShape {
scale: f64,
c: f64,
d: f64
}
impl Gamma {
/// Construct an object representing the `Gamma(shape, scale)`
/// distribution.
///
/// Panics if `shape <= 0` or `scale <= 0`.
pub fn new(shape: f64, scale: f64) -> Gamma {
assert!(shape > 0.0, "Gamma::new called with shape <= 0");
assert!(scale > 0.0, "Gamma::new called with scale <= 0");
let repr = match shape {
1.0 => One(Exp::new(1.0 / scale)),
0.0... 1.0 => Small(GammaSmallShape::new_raw(shape, scale)),
_ => Large(GammaLargeShape::new_raw(shape, scale))
};
Gamma { repr: repr }
}
}
impl GammaSmallShape {
fn new_raw(shape: f64, scale: f64) -> GammaSmallShape {
GammaSmallShape {
inv_shape: 1. / shape,
large_shape: GammaLargeShape::new_raw(shape + 1.0, scale)
}
}
}
impl GammaLargeShape {
fn new_raw(shape: f64, scale: f64) -> GammaLargeShape {
let d = shape - 1. / 3.;
GammaLargeShape {
scale: scale,
c: 1. / (9. * d).sqrt(),
d: d
}
}
}
impl Sample<f64> for Gamma {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
}
impl Sample<f64> for GammaSmallShape {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
}
impl Sample<f64> for GammaLargeShape {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
}
impl IndependentSample<f64> for Gamma {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
match self.repr {
Small(ref g) => g.ind_sample(rng),
One(ref g) => g.ind_sample(rng),
Large(ref g) => g.ind_sample(rng),
}
}
}
impl IndependentSample<f64> for GammaSmallShape {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
let Open01(u) = rng.gen::<Open01<f64>>();
self.large_shape.ind_sample(rng) * u.powf(self.inv_shape)
}
}
impl IndependentSample<f64> for GammaLargeShape {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
loop {
let StandardNormal(x) = rng.gen::<StandardNormal>();
let v_cbrt = 1.0 + self.c * x;
if v_cbrt <= 0.0 { // a^3 <= 0 iff a <= 0
continue
}
let v = v_cbrt * v_cbrt * v_cbrt;
let Open01(u) = rng.gen::<Open01<f64>>();
let x_sqr = x * x;
if u < 1.0 - 0.0331 * x_sqr * x_sqr ||
u.ln() < 0.5 * x_sqr + self.d * (1.0 - v + v.ln()) {
return self.d * v * self.scale
}
}
}
}
/// The chi-squared distribution `χ²(k)`, where `k` is the degrees of
/// freedom.
///
/// For `k > 0` integral, this distribution is the sum of the squares
/// of `k` independent standard normal random variables. For other
/// `k`, this uses the equivalent characterisation `χ²(k) = Gamma(k/2,
/// 2)`.
///
/// # Example
///
/// ```rust
/// use rand::distributions::{ChiSquared, IndependentSample};
///
/// let chi = ChiSquared::new(11.0);
/// let v = chi.ind_sample(&mut rand::thread_rng());
/// println!("{} is from a χ²(11) distribution", v)
/// ```
pub struct ChiSquared {
repr: ChiSquaredRepr,
}
enum ChiSquaredRepr {
// k == 1, Gamma(alpha,..) is particularly slow for alpha < 1,
// e.g. when alpha = 1/2 as it would be for this case, so special-
// casing and using the definition of N(0,1)^2 is faster.
DoFExactlyOne,
DoFAnythingElse(Gamma),
}
impl ChiSquared {
/// Create a new chi-squared distribution with degrees-of-freedom
/// `k`. Panics if `k < 0`.
pub fn new(k: f64) -> ChiSquared {
let repr = if k == 1.0 {
DoFExactlyOne
} else {
assert!(k > 0.0, "ChiSquared::new called with `k` < 0");
DoFAnythingElse(Gamma::new(0.5 * k, 2.0))
};
ChiSquared { repr: repr }
}
}
impl Sample<f64> for ChiSquared {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
}
impl IndependentSample<f64> for ChiSquared {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
match self.repr {
DoFExactlyOne => {
// k == 1 => N(0,1)^2
let StandardNormal(norm) = rng.gen::<StandardNormal>();
norm * norm
}
DoFAnythingElse(ref g) => g.ind_sample(rng)
}
}
}
/// The Fisher F distribution `F(m, n)`.
///
/// This distribution is equivalent to the ratio of two normalised
/// chi-squared distributions, that is, `F(m,n) = (χ²(m)/m) /
/// (χ²(n)/n)`.
///
/// # Example
///
/// ```rust
/// use rand::distributions::{FisherF, IndependentSample};
///
/// let f = FisherF::new(2.0, 32.0);
/// let v = f.ind_sample(&mut rand::thread_rng());
/// println!("{} is from an F(2, 32) distribution", v)
/// ```
pub struct FisherF {
numer: ChiSquared,
denom: ChiSquared,
// denom_dof / numer_dof so that this can just be a straight
// multiplication, rather than a division.
dof_ratio: f64,
}
impl FisherF {
/// Create a new `FisherF` distribution, with the given
/// parameter. Panics if either `m` or `n` are not positive.
pub fn new(m: f64, n: f64) -> FisherF {
assert!(m > 0.0, "FisherF::new called with `m < 0`");
assert!(n > 0.0, "FisherF::new called with `n < 0`");
FisherF {
numer: ChiSquared::new(m),
denom: ChiSquared::new(n),
dof_ratio: n / m
}
}
}
impl Sample<f64> for FisherF {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
}
impl IndependentSample<f64> for FisherF {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
self.numer.ind_sample(rng) / self.denom.ind_sample(rng) * self.dof_ratio
}
}
/// The Student t distribution, `t(nu)`, where `nu` is the degrees of
/// freedom.
///
/// # Example
///
/// ```rust
/// use rand::distributions::{StudentT, IndependentSample};
///
/// let t = StudentT::new(11.0);
/// let v = t.ind_sample(&mut rand::thread_rng());
/// println!("{} is from a t(11) distribution", v)
/// ```
pub struct StudentT {
chi: ChiSquared,
dof: f64
}
impl StudentT {
/// Create a new Student t distribution with `n` degrees of
/// freedom. Panics if `n <= 0`.
pub fn new(n: f64) -> StudentT {
assert!(n > 0.0, "StudentT::new called with `n <= 0`");
StudentT {
chi: ChiSquared::new(n),
dof: n
}
}
}
impl Sample<f64> for StudentT {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
}
impl IndependentSample<f64> for StudentT {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
let StandardNormal(norm) = rng.gen::<StandardNormal>();
norm * (self.dof / self.chi.ind_sample(rng)).sqrt()
}
}
#[cfg(test)]
mod test {
use distributions::{Sample, IndependentSample};
use super::{ChiSquared, StudentT, FisherF};
#[test]
fn test_chi_squared_one() {
let mut chi = ChiSquared::new(1.0);
let mut rng = ::test::rng();
for _ in 0..1000 {
chi.sample(&mut rng);
chi.ind_sample(&mut rng);
}
}
#[test]
fn test_chi_squared_small() {
let mut chi = ChiSquared::new(0.5);
let mut rng = ::test::rng();
for _ in 0..1000 {
chi.sample(&mut rng);
chi.ind_sample(&mut rng);
}
}
#[test]
fn test_chi_squared_large() {
let mut chi = ChiSquared::new(30.0);
let mut rng = ::test::rng();
for _ in 0..1000 {
chi.sample(&mut rng);
chi.ind_sample(&mut rng);
}
}
#[test]
#[should_fail]
fn test_chi_squared_invalid_dof() {
ChiSquared::new(-1.0);
}
#[test]
fn test_f() {
let mut f = FisherF::new(2.0, 32.0);
let mut rng = ::test::rng();
for _ in 0..1000 {
f.sample(&mut rng);
f.ind_sample(&mut rng);
}
}
#[test]
fn test_t() {
let mut t = StudentT::new(11.0);
let mut rng = ::test::rng();
for _ in 0..1000 {
t.sample(&mut rng);
t.ind_sample(&mut rng);
}
}
}
#[cfg(test)]
mod bench {
extern crate test;
use std::prelude::v1::*;
use self::test::Bencher;
use std::mem::size_of;
use distributions::IndependentSample;
use super::Gamma;
#[bench]
fn bench_gamma_large_shape(b: &mut Bencher) {
let gamma = Gamma::new(10., 1.0);
let mut rng = ::test::weak_rng();
b.iter(|| {
for _ in 0..::RAND_BENCH_N {
gamma.ind_sample(&mut rng);
}
});
b.bytes = size_of::<f64>() as u64 * ::RAND_BENCH_N;
}
#[bench]
fn bench_gamma_small_shape(b: &mut Bencher) {
let gamma = Gamma::new(0.1, 1.0);
let mut rng = ::test::weak_rng();
b.iter(|| {
for _ in 0..::RAND_BENCH_N {
gamma.ind_sample(&mut rng);
}
});
b.bytes = size_of::<f64>() as u64 * ::RAND_BENCH_N;
}
} | random_line_split |
|
gamma.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
// ignore-lexer-test FIXME #15679
//! The Gamma and derived distributions.
use self::GammaRepr::*;
use self::ChiSquaredRepr::*;
use core::num::Float;
use {Rng, Open01};
use super::normal::StandardNormal;
use super::{IndependentSample, Sample, Exp};
/// The Gamma distribution `Gamma(shape, scale)` distribution.
///
/// The density function of this distribution is
///
/// ```text
/// f(x) = x^(k - 1) * exp(-x / θ) / (Γ(k) * θ^k)
/// ```
///
/// where `Γ` is the Gamma function, `k` is the shape and `θ` is the
/// scale and both `k` and `θ` are strictly positive.
///
/// The algorithm used is that described by Marsaglia & Tsang 2000[1],
/// falling back to directly sampling from an Exponential for `shape
/// == 1`, and using the boosting technique described in [1] for
/// `shape < 1`.
///
/// # Example
///
/// ```rust
/// use rand::distributions::{IndependentSample, Gamma};
///
/// let gamma = Gamma::new(2.0, 5.0);
/// let v = gamma.ind_sample(&mut rand::thread_rng());
/// println!("{} is from a Gamma(2, 5) distribution", v);
/// ```
///
/// [1]: George Marsaglia and Wai Wan Tsang. 2000. "A Simple Method
/// for Generating Gamma Variables" *ACM Trans. Math. Softw.* 26, 3
/// (September 2000),
/// 363-372. DOI:[10.1145/358407.358414](http://doi.acm.org/10.1145/358407.358414)
pub struct Gamma {
repr: GammaRepr,
}
enum GammaRepr {
Large(GammaLargeShape),
One(Exp),
Small(GammaSmallShape)
}
// These two helpers could be made public, but saving the
// match-on-Gamma-enum branch from using them directly (e.g. if one
// knows that the shape is always > 1) doesn't appear to be much
// faster.
/// Gamma distribution where the shape parameter is less than 1.
///
/// Note, samples from this require a compulsory floating-point `pow`
/// call, which makes it significantly slower than sampling from a
/// gamma distribution where the shape parameter is greater than or
/// equal to 1.
///
/// See `Gamma` for sampling from a Gamma distribution with general
/// shape parameters.
struct GammaSmallShape {
inv_shape: f64,
large_shape: GammaLargeShape
}
/// Gamma distribution where the shape parameter is larger than 1.
///
/// See `Gamma` for sampling from a Gamma distribution with general
/// shape parameters.
struct GammaLargeShape {
scale: f64,
c: f64,
d: f64
}
impl Gamma {
/// Construct an object representing the `Gamma(shape, scale)`
/// distribution.
///
/// Panics if `shape <= 0` or `scale <= 0`.
pub fn new(shape: f64, scale: f64) -> Gamma {
assert!(shape > 0.0, "Gamma::new called with shape <= 0");
assert!(scale > 0.0, "Gamma::new called with scale <= 0");
let repr = match shape {
1.0 => One(Exp::new(1.0 / scale)),
0.0... 1.0 => Small(GammaSmallShape::new_raw(shape, scale)),
_ => Large(GammaLargeShape::new_raw(shape, scale))
};
Gamma { repr: repr }
}
}
impl GammaSmallShape {
fn new_raw(shape: f64, scale: f64) -> GammaSmallShape {
GammaSmallShape {
inv_shape: 1. / shape,
large_shape: GammaLargeShape::new_raw(shape + 1.0, scale)
}
}
}
impl GammaLargeShape {
fn new_raw(shape: f64, scale: f64) -> GammaLargeShape {
let d = shape - 1. / 3.;
GammaLargeShape {
scale: scale,
c: 1. / (9. * d).sqrt(),
d: d
}
}
}
impl Sample<f64> for Gamma {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
}
impl Sample<f64> for GammaSmallShape {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
}
impl Sample<f64> for GammaLargeShape {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
}
impl IndependentSample<f64> for Gamma {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
match self.repr {
Small(ref g) => g.ind_sample(rng),
One(ref g) => g.ind_sample(rng),
Large(ref g) => g.ind_sample(rng),
}
}
}
impl IndependentSample<f64> for GammaSmallShape {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
let Open01(u) = rng.gen::<Open01<f64>>();
self.large_shape.ind_sample(rng) * u.powf(self.inv_shape)
}
}
impl IndependentSample<f64> for GammaLargeShape {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
loop {
let StandardNormal(x) = rng.gen::<StandardNormal>();
let v_cbrt = 1.0 + self.c * x;
if v_cbrt <= 0.0 { // a^3 <= 0 iff a <= 0
continue
}
let v = v_cbrt * v_cbrt * v_cbrt;
let Open01(u) = rng.gen::<Open01<f64>>();
let x_sqr = x * x;
if u < 1.0 - 0.0331 * x_sqr * x_sqr ||
u.ln() < 0.5 * x_sqr + self.d * (1.0 - v + v.ln()) {
return self.d * v * self.scale
}
}
}
}
/// The chi-squared distribution `χ²(k)`, where `k` is the degrees of
/// freedom.
///
/// For `k > 0` integral, this distribution is the sum of the squares
/// of `k` independent standard normal random variables. For other
/// `k`, this uses the equivalent characterisation `χ²(k) = Gamma(k/2,
/// 2)`.
///
/// # Example
///
/// ```rust
/// use rand::distributions::{ChiSquared, IndependentSample};
///
/// let chi = ChiSquared::new(11.0);
/// let v = chi.ind_sample(&mut rand::thread_rng());
/// println!("{} is from a χ²(11) distribution", v)
/// ```
pub struct ChiSquared {
repr: ChiSquaredRepr,
}
enum ChiSquaredRepr {
// k == 1, Gamma(alpha,..) is particularly slow for alpha < 1,
// e.g. when alpha = 1/2 as it would be for this case, so special-
// casing and using the definition of N(0,1)^2 is faster.
DoFExactlyOne,
DoFAnythingElse(Gamma),
}
impl ChiSquared {
/// Create a new chi-squared distribution with degrees-of-freedom
/// `k`. Panics if `k < 0`.
pub fn new(k: f64) -> ChiSquared {
let repr = if k == 1.0 {
DoFExactlyOne
} else {
assert!(k > 0.0, "ChiSquared::new called with `k` < 0");
DoFAnythingElse(Gamma::new(0.5 * k, 2.0))
};
ChiSquared { repr: repr }
}
}
impl Sample<f64> for ChiSquared {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
}
impl IndependentSample<f64> for ChiSquared {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
match self.repr {
DoFExactlyOne => {
// k == 1 => N(0,1)^2
let StandardNormal(norm) = rng.gen::<StandardNormal>();
norm * norm
}
DoFAnythingElse(ref g) => g.ind_sample(rng)
}
}
}
/// The Fisher F distribution `F(m, n)`.
///
/// This distribution is equivalent to the ratio of two normalised
/// chi-squared distributions, that is, `F(m,n) = (χ²(m)/m) /
/// (χ²(n)/n)`.
///
/// # Example
///
/// ```rust
/// use rand::distributions::{FisherF, IndependentSample};
///
/// let f = FisherF::new(2.0, 32.0);
/// let v = f.ind_sample(&mut rand::thread_rng());
/// println!("{} is from an F(2, 32) distribution", v)
/// ```
pub struct FisherF {
numer: ChiSquared,
denom: ChiSquared,
// denom_dof / numer_dof so that this can just be a straight
// multiplication, rather than a division.
dof_ratio: f64,
}
impl FisherF {
/// Create a new `FisherF` distribution, with the given
/// parameter. Panics if either `m` or `n` are not positive.
pub fn new(m: f64, n: f64) -> FisherF {
assert!(m > 0.0, "FisherF::new called with `m < 0`");
assert!(n > 0.0, "FisherF::new called with `n < 0`");
FisherF {
numer: ChiSquared::new(m),
denom: ChiSquared::new(n),
dof_ratio: n / m
}
}
}
impl Sample<f64> for FisherF {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
}
impl IndependentSample<f64> for FisherF {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
self.numer.ind_sample(rng) / self.denom.ind_sample(rng) * self.dof_ratio
}
}
/// The Student t distribution, `t(nu)`, where `nu` is the degrees of
/// freedom.
///
/// # Example
///
/// ```rust
/// use rand::distributions::{StudentT, IndependentSample};
///
/// let t = StudentT::new(11.0);
/// let v = t.ind_sample(&mut rand::thread_rng());
/// println!("{} is from a t(11) distribution", v)
/// ```
pub struct StudentT {
chi: ChiSquared,
dof: f64
}
impl StudentT {
/// Create a new Student t distribution with `n` degrees of
/// freedom. Panics if `n <= 0`.
pub fn new(n: f64) -> StudentT {
assert!(n > 0.0, "StudentT::new called with `n <= 0`");
StudentT {
chi: ChiSquared::new(n),
dof: n
}
}
}
impl Sample<f64> for StudentT {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
}
impl IndependentSample<f64> for StudentT {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
let StandardNormal(norm) = rng.gen::<StandardNormal>();
norm * (self.dof / self.chi.ind_sample(rng)).sqrt()
}
}
#[cfg(test)]
mod test {
use distributions::{Sample, IndependentSample};
use super::{ChiSquared, StudentT, FisherF};
#[test]
fn test_chi_squared_one() {
let mut chi = ChiSquared::new(1.0);
let mut rng = ::test::rng();
for _ in 0..1000 {
chi.sample(&mut rng);
chi.ind_sample(&mut rng);
}
}
#[test]
fn test_chi_squared_small() {
let mut chi = ChiSquared::new(0.5);
let mut rng = ::test::rng();
for _ in 0..1000 {
chi.sample(&mut rng);
chi.ind_sample(&mut rng);
}
}
#[test]
fn test_chi_squared_large() {
let mut chi = ChiSquared::new(30.0);
let mut rng = ::test::rng();
for _ in 0..1000 {
chi.sample(&mut rng);
chi.ind_sample(&mut rng);
}
}
#[test]
#[should_fail]
fn test_chi_squared_invalid_dof() {
ChiSquared::new(-1.0);
}
#[test]
fn test_f() {
let mut f = FisherF::new(2.0, 32.0);
let mut rng = ::test::rng();
for _ in 0..1000 {
f.sample(&mut rng);
f.ind_sample(&mut rng);
}
}
#[test]
fn test_t() {
let mut t = StudentT::new(11.0);
let mut rng = ::test::rng();
for _ in 0..1000 {
t.sample(&mut rng);
t.ind_sample(&mut rng);
}
}
}
#[cfg(test)]
mod bench {
extern crate test;
use std::prelude::v1::*;
use self::test::Bencher;
use std::mem::size_of;
use distributions::IndependentSample;
use super::Gamma;
#[bench]
fn bench_gamma_larg | ) {
let gamma = Gamma::new(10., 1.0);
let mut rng = ::test::weak_rng();
b.iter(|| {
for _ in 0..::RAND_BENCH_N {
gamma.ind_sample(&mut rng);
}
});
b.bytes = size_of::<f64>() as u64 * ::RAND_BENCH_N;
}
#[bench]
fn bench_gamma_small_shape(b: &mut Bencher) {
let gamma = Gamma::new(0.1, 1.0);
let mut rng = ::test::weak_rng();
b.iter(|| {
for _ in 0..::RAND_BENCH_N {
gamma.ind_sample(&mut rng);
}
});
b.bytes = size_of::<f64>() as u64 * ::RAND_BENCH_N;
}
}
| e_shape(b: &mut Bencher | identifier_name |
gamma.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
// ignore-lexer-test FIXME #15679
//! The Gamma and derived distributions.
use self::GammaRepr::*;
use self::ChiSquaredRepr::*;
use core::num::Float;
use {Rng, Open01};
use super::normal::StandardNormal;
use super::{IndependentSample, Sample, Exp};
/// The Gamma distribution `Gamma(shape, scale)` distribution.
///
/// The density function of this distribution is
///
/// ```text
/// f(x) = x^(k - 1) * exp(-x / θ) / (Γ(k) * θ^k)
/// ```
///
/// where `Γ` is the Gamma function, `k` is the shape and `θ` is the
/// scale and both `k` and `θ` are strictly positive.
///
/// The algorithm used is that described by Marsaglia & Tsang 2000[1],
/// falling back to directly sampling from an Exponential for `shape
/// == 1`, and using the boosting technique described in [1] for
/// `shape < 1`.
///
/// # Example
///
/// ```rust
/// use rand::distributions::{IndependentSample, Gamma};
///
/// let gamma = Gamma::new(2.0, 5.0);
/// let v = gamma.ind_sample(&mut rand::thread_rng());
/// println!("{} is from a Gamma(2, 5) distribution", v);
/// ```
///
/// [1]: George Marsaglia and Wai Wan Tsang. 2000. "A Simple Method
/// for Generating Gamma Variables" *ACM Trans. Math. Softw.* 26, 3
/// (September 2000),
/// 363-372. DOI:[10.1145/358407.358414](http://doi.acm.org/10.1145/358407.358414)
pub struct Gamma {
repr: GammaRepr,
}
enum GammaRepr {
Large(GammaLargeShape),
One(Exp),
Small(GammaSmallShape)
}
// These two helpers could be made public, but saving the
// match-on-Gamma-enum branch from using them directly (e.g. if one
// knows that the shape is always > 1) doesn't appear to be much
// faster.
/// Gamma distribution where the shape parameter is less than 1.
///
/// Note, samples from this require a compulsory floating-point `pow`
/// call, which makes it significantly slower than sampling from a
/// gamma distribution where the shape parameter is greater than or
/// equal to 1.
///
/// See `Gamma` for sampling from a Gamma distribution with general
/// shape parameters.
struct GammaSmallShape {
inv_shape: f64,
large_shape: GammaLargeShape
}
/// Gamma distribution where the shape parameter is larger than 1.
///
/// See `Gamma` for sampling from a Gamma distribution with general
/// shape parameters.
struct GammaLargeShape {
scale: f64,
c: f64,
d: f64
}
impl Gamma {
/// Construct an object representing the `Gamma(shape, scale)`
/// distribution.
///
/// Panics if `shape <= 0` or `scale <= 0`.
pub fn new(shape: f64, scale: f64) -> Gamma {
assert!(shape > 0.0, "Gamma::new called with shape <= 0");
assert!(scale > 0.0, "Gamma::new called with scale <= 0");
let repr = match shape {
1.0 => One(Exp::new(1.0 / scale)),
0.0... 1.0 => Small(GammaSmallShape::new_raw(shape, scale)),
_ => Large(GammaLargeShape::new_raw(shape, scale))
};
Gamma { repr: repr }
}
}
impl GammaSmallShape {
fn new_raw(shape: f64, scale: f64) -> GammaSmallShape {
GammaSmallShape {
inv_shape: 1. / shape,
large_shape: GammaLargeShape::new_raw(shape + 1.0, scale)
}
}
}
impl GammaLargeShape {
fn new_raw(shape: f64, scale: f64) -> GammaLargeShape {
let d = shape - 1. / 3.;
GammaLargeShape {
scale: scale,
c: 1. / (9. * d).sqrt(),
d: d
}
}
}
impl Sample<f64> for Gamma {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self | l Sample<f64> for GammaSmallShape {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
}
impl Sample<f64> for GammaLargeShape {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
}
impl IndependentSample<f64> for Gamma {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
match self.repr {
Small(ref g) => g.ind_sample(rng),
One(ref g) => g.ind_sample(rng),
Large(ref g) => g.ind_sample(rng),
}
}
}
impl IndependentSample<f64> for GammaSmallShape {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
let Open01(u) = rng.gen::<Open01<f64>>();
self.large_shape.ind_sample(rng) * u.powf(self.inv_shape)
}
}
impl IndependentSample<f64> for GammaLargeShape {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
loop {
let StandardNormal(x) = rng.gen::<StandardNormal>();
let v_cbrt = 1.0 + self.c * x;
if v_cbrt <= 0.0 { // a^3 <= 0 iff a <= 0
continue
}
let v = v_cbrt * v_cbrt * v_cbrt;
let Open01(u) = rng.gen::<Open01<f64>>();
let x_sqr = x * x;
if u < 1.0 - 0.0331 * x_sqr * x_sqr ||
u.ln() < 0.5 * x_sqr + self.d * (1.0 - v + v.ln()) {
return self.d * v * self.scale
}
}
}
}
/// The chi-squared distribution `χ²(k)`, where `k` is the degrees of
/// freedom.
///
/// For `k > 0` integral, this distribution is the sum of the squares
/// of `k` independent standard normal random variables. For other
/// `k`, this uses the equivalent characterisation `χ²(k) = Gamma(k/2,
/// 2)`.
///
/// # Example
///
/// ```rust
/// use rand::distributions::{ChiSquared, IndependentSample};
///
/// let chi = ChiSquared::new(11.0);
/// let v = chi.ind_sample(&mut rand::thread_rng());
/// println!("{} is from a χ²(11) distribution", v)
/// ```
pub struct ChiSquared {
repr: ChiSquaredRepr,
}
enum ChiSquaredRepr {
// k == 1, Gamma(alpha,..) is particularly slow for alpha < 1,
// e.g. when alpha = 1/2 as it would be for this case, so special-
// casing and using the definition of N(0,1)^2 is faster.
DoFExactlyOne,
DoFAnythingElse(Gamma),
}
impl ChiSquared {
/// Create a new chi-squared distribution with degrees-of-freedom
/// `k`. Panics if `k < 0`.
pub fn new(k: f64) -> ChiSquared {
let repr = if k == 1.0 {
DoFExactlyOne
} else {
assert!(k > 0.0, "ChiSquared::new called with `k` < 0");
DoFAnythingElse(Gamma::new(0.5 * k, 2.0))
};
ChiSquared { repr: repr }
}
}
impl Sample<f64> for ChiSquared {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
}
impl IndependentSample<f64> for ChiSquared {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
match self.repr {
DoFExactlyOne => {
// k == 1 => N(0,1)^2
let StandardNormal(norm) = rng.gen::<StandardNormal>();
norm * norm
}
DoFAnythingElse(ref g) => g.ind_sample(rng)
}
}
}
/// The Fisher F distribution `F(m, n)`.
///
/// This distribution is equivalent to the ratio of two normalised
/// chi-squared distributions, that is, `F(m,n) = (χ²(m)/m) /
/// (χ²(n)/n)`.
///
/// # Example
///
/// ```rust
/// use rand::distributions::{FisherF, IndependentSample};
///
/// let f = FisherF::new(2.0, 32.0);
/// let v = f.ind_sample(&mut rand::thread_rng());
/// println!("{} is from an F(2, 32) distribution", v)
/// ```
pub struct FisherF {
numer: ChiSquared,
denom: ChiSquared,
// denom_dof / numer_dof so that this can just be a straight
// multiplication, rather than a division.
dof_ratio: f64,
}
impl FisherF {
/// Create a new `FisherF` distribution, with the given
/// parameter. Panics if either `m` or `n` are not positive.
pub fn new(m: f64, n: f64) -> FisherF {
assert!(m > 0.0, "FisherF::new called with `m < 0`");
assert!(n > 0.0, "FisherF::new called with `n < 0`");
FisherF {
numer: ChiSquared::new(m),
denom: ChiSquared::new(n),
dof_ratio: n / m
}
}
}
impl Sample<f64> for FisherF {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
}
impl IndependentSample<f64> for FisherF {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
self.numer.ind_sample(rng) / self.denom.ind_sample(rng) * self.dof_ratio
}
}
/// The Student t distribution, `t(nu)`, where `nu` is the degrees of
/// freedom.
///
/// # Example
///
/// ```rust
/// use rand::distributions::{StudentT, IndependentSample};
///
/// let t = StudentT::new(11.0);
/// let v = t.ind_sample(&mut rand::thread_rng());
/// println!("{} is from a t(11) distribution", v)
/// ```
pub struct StudentT {
chi: ChiSquared,
dof: f64
}
impl StudentT {
/// Create a new Student t distribution with `n` degrees of
/// freedom. Panics if `n <= 0`.
pub fn new(n: f64) -> StudentT {
assert!(n > 0.0, "StudentT::new called with `n <= 0`");
StudentT {
chi: ChiSquared::new(n),
dof: n
}
}
}
impl Sample<f64> for StudentT {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 { self.ind_sample(rng) }
}
impl IndependentSample<f64> for StudentT {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
let StandardNormal(norm) = rng.gen::<StandardNormal>();
norm * (self.dof / self.chi.ind_sample(rng)).sqrt()
}
}
#[cfg(test)]
mod test {
use distributions::{Sample, IndependentSample};
use super::{ChiSquared, StudentT, FisherF};
#[test]
fn test_chi_squared_one() {
let mut chi = ChiSquared::new(1.0);
let mut rng = ::test::rng();
for _ in 0..1000 {
chi.sample(&mut rng);
chi.ind_sample(&mut rng);
}
}
#[test]
fn test_chi_squared_small() {
let mut chi = ChiSquared::new(0.5);
let mut rng = ::test::rng();
for _ in 0..1000 {
chi.sample(&mut rng);
chi.ind_sample(&mut rng);
}
}
#[test]
fn test_chi_squared_large() {
let mut chi = ChiSquared::new(30.0);
let mut rng = ::test::rng();
for _ in 0..1000 {
chi.sample(&mut rng);
chi.ind_sample(&mut rng);
}
}
#[test]
#[should_fail]
fn test_chi_squared_invalid_dof() {
ChiSquared::new(-1.0);
}
#[test]
fn test_f() {
let mut f = FisherF::new(2.0, 32.0);
let mut rng = ::test::rng();
for _ in 0..1000 {
f.sample(&mut rng);
f.ind_sample(&mut rng);
}
}
#[test]
fn test_t() {
let mut t = StudentT::new(11.0);
let mut rng = ::test::rng();
for _ in 0..1000 {
t.sample(&mut rng);
t.ind_sample(&mut rng);
}
}
}
#[cfg(test)]
mod bench {
extern crate test;
use std::prelude::v1::*;
use self::test::Bencher;
use std::mem::size_of;
use distributions::IndependentSample;
use super::Gamma;
#[bench]
fn bench_gamma_large_shape(b: &mut Bencher) {
let gamma = Gamma::new(10., 1.0);
let mut rng = ::test::weak_rng();
b.iter(|| {
for _ in 0..::RAND_BENCH_N {
gamma.ind_sample(&mut rng);
}
});
b.bytes = size_of::<f64>() as u64 * ::RAND_BENCH_N;
}
#[bench]
fn bench_gamma_small_shape(b: &mut Bencher) {
let gamma = Gamma::new(0.1, 1.0);
let mut rng = ::test::weak_rng();
b.iter(|| {
for _ in 0..::RAND_BENCH_N {
gamma.ind_sample(&mut rng);
}
});
b.bytes = size_of::<f64>() as u64 * ::RAND_BENCH_N;
}
}
| .ind_sample(rng) }
}
imp | identifier_body |
get_state_events_for_empty_key.rs | //! [GET /_matrix/client/r0/rooms/{roomId}/state/{eventType}](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-rooms-roomid-state-eventtype)
use ruma_api::ruma_api;
use ruma_events::EventType;
use ruma_identifiers::RoomId;
use serde_json::value::RawValue as RawJsonValue;
ruma_api! {
metadata {
description: "Get state events of a given type associated with the empty key.",
method: GET,
name: "get_state_events_for_empty_key",
path: "/_matrix/client/r0/rooms/:room_id/state/:event_type",
rate_limited: false,
requires_authentication: true,
}
request {
/// The room to look up the state for.
#[ruma_api(path)]
pub room_id: RoomId,
/// The type of state to look up. | response {
/// The content of the state event.
///
/// To create a `Box<RawJsonValue>`, use `serde_json::value::to_raw_value`.
#[ruma_api(body)]
pub content: Box<RawJsonValue>,
}
error: crate::Error
} | #[ruma_api(path)]
pub event_type: EventType,
}
| random_line_split |
lib.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::path::Path;
use anyhow::{Context, Result};
use serde::{Deserialize, Serialize};
use stack_config::StackConfig;
use tracing::{event, trace, Level};
use edenfs_error::EdenFsError;
#[derive(Serialize, Deserialize, StackConfig, Debug)]
#[serde(rename_all = "camelCase")]
pub struct Core {
#[stack(default)]
eden_directory: Option<String>,
}
#[derive(Serialize, Deserialize, StackConfig, Debug)]
pub struct EdenFsConfig {
#[stack(nested)]
core: Core,
#[stack(merge = "merge_table")]
#[serde(flatten)]
other: toml::value::Table,
}
fn merge_table(lhs: &mut toml::value::Table, rhs: toml::value::Table) {
for (key, value) in rhs.into_iter() {
if let Some(lhs_value) = lhs.get_mut(&key) {
// Key exists
if let (Some(lhs_table), true) = (lhs_value.as_table_mut(), value.is_table()) {
// Both value are table, we merge them
// SAFETY: unwrap here is guaranteed by `value.is_table()`. This
// is awkward because try_into will consume the value, making
// the else-clause not able to use it later.
merge_table(lhs_table, value.try_into::<toml::value::Table>().unwrap());
} else {
// Otherwise, either the values are not table type, or they have
// different types. In both case we prefer rhs value.
*lhs_value = value;
}
} else {
// Key does not exist in lhs
lhs.insert(key, value);
}
}
}
fn load_path(loader: &mut EdenFsConfigLoader, path: &Path) -> Result<()> {
let content = String::from_utf8(std::fs::read(&path)?)?;
trace!(?content,?path, "Loading config");
loader.load(toml::from_str(&content)?);
Ok(())
}
fn load_system(loader: &mut EdenFsConfigLoader, etc_dir: &Path) -> Result<()> {
load_path(loader, &etc_dir.join("edenfs.rc"))
}
fn load_system_rcs(loader: &mut EdenFsConfigLoader, etc_dir: &Path) -> Result<()> {
let rcs_dir = etc_dir.join("config.d");
let entries = std::fs::read_dir(&rcs_dir)
.with_context(|| format!("Unable to read configuration from {:?}", rcs_dir))?;
for rc in entries {
let rc = match rc {
Ok(rc) => rc,
Err(e) => {
event!(
Level::INFO,
"Unable to read configuration, skipped: {:?}",
e
);
continue;
}
};
let name = rc.file_name();
let name = if let Some(name) = name.to_str() {
name
} else {
continue;
};
if name.starts_with('.') ||!name.ends_with(".toml") {
continue;
}
if let Err(e) = load_path(loader, &rc.path()) {
event!(
Level::DEBUG,
"Not able to load '{}': {:?}",
rc.path().display(),
e
);
}
}
Ok(())
}
fn load_user(loader: &mut EdenFsConfigLoader, home_dir: &Path) -> Result<()> {
let home_rc = home_dir.join(".edenrc");
load_path(loader, &home_rc)
}
pub fn load_config(
etc_eden_dir: &Path,
home_dir: Option<&Path>,
) -> Result<EdenFsConfig, EdenFsError> {
let mut loader = EdenFsConfig::loader();
if let Err(e) = load_system(&mut loader, &etc_eden_dir) {
event!(
Level::INFO,
etc_eden_dir =?etc_eden_dir,
"Unable to load system configuration, skipped: {:?}",
e
);
} else {
event!(Level::DEBUG, "System configuration loaded");
}
if let Err(e) = load_system_rcs(&mut loader, &etc_eden_dir) | else {
event!(Level::DEBUG, "System RC configurations loaded");
}
if let Some(home) = home_dir {
if let Err(e) = load_user(&mut loader, &home) {
event!(Level::INFO, home =?home, "Unable to load user configuration, skipped: {:?}", e);
} else {
event!(Level::DEBUG, "User configuration loaded");
}
} else {
event!(
Level::INFO,
"Unable to find home dir. User configuration is not loaded."
);
}
Ok(loader.build().map_err(EdenFsError::ConfigurationError)?)
}
| {
event!(
Level::INFO,
etc_eden_dir = ?etc_eden_dir,
"Unable to load system RC configurations, skipped: {:?}",
e
);
} | conditional_block |
lib.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::path::Path;
use anyhow::{Context, Result};
use serde::{Deserialize, Serialize};
use stack_config::StackConfig;
use tracing::{event, trace, Level};
use edenfs_error::EdenFsError;
#[derive(Serialize, Deserialize, StackConfig, Debug)]
#[serde(rename_all = "camelCase")]
pub struct Core {
#[stack(default)]
eden_directory: Option<String>,
}
#[derive(Serialize, Deserialize, StackConfig, Debug)]
pub struct EdenFsConfig {
#[stack(nested)]
core: Core,
#[stack(merge = "merge_table")]
#[serde(flatten)]
other: toml::value::Table,
}
fn merge_table(lhs: &mut toml::value::Table, rhs: toml::value::Table) {
for (key, value) in rhs.into_iter() {
if let Some(lhs_value) = lhs.get_mut(&key) {
// Key exists
if let (Some(lhs_table), true) = (lhs_value.as_table_mut(), value.is_table()) {
// Both value are table, we merge them
// SAFETY: unwrap here is guaranteed by `value.is_table()`. This
// is awkward because try_into will consume the value, making
// the else-clause not able to use it later.
merge_table(lhs_table, value.try_into::<toml::value::Table>().unwrap());
} else {
// Otherwise, either the values are not table type, or they have
// different types. In both case we prefer rhs value.
*lhs_value = value;
}
} else {
// Key does not exist in lhs
lhs.insert(key, value);
}
}
}
fn load_path(loader: &mut EdenFsConfigLoader, path: &Path) -> Result<()> {
let content = String::from_utf8(std::fs::read(&path)?)?;
trace!(?content,?path, "Loading config");
loader.load(toml::from_str(&content)?);
Ok(())
}
fn load_system(loader: &mut EdenFsConfigLoader, etc_dir: &Path) -> Result<()> {
load_path(loader, &etc_dir.join("edenfs.rc"))
}
fn | (loader: &mut EdenFsConfigLoader, etc_dir: &Path) -> Result<()> {
let rcs_dir = etc_dir.join("config.d");
let entries = std::fs::read_dir(&rcs_dir)
.with_context(|| format!("Unable to read configuration from {:?}", rcs_dir))?;
for rc in entries {
let rc = match rc {
Ok(rc) => rc,
Err(e) => {
event!(
Level::INFO,
"Unable to read configuration, skipped: {:?}",
e
);
continue;
}
};
let name = rc.file_name();
let name = if let Some(name) = name.to_str() {
name
} else {
continue;
};
if name.starts_with('.') ||!name.ends_with(".toml") {
continue;
}
if let Err(e) = load_path(loader, &rc.path()) {
event!(
Level::DEBUG,
"Not able to load '{}': {:?}",
rc.path().display(),
e
);
}
}
Ok(())
}
fn load_user(loader: &mut EdenFsConfigLoader, home_dir: &Path) -> Result<()> {
let home_rc = home_dir.join(".edenrc");
load_path(loader, &home_rc)
}
pub fn load_config(
etc_eden_dir: &Path,
home_dir: Option<&Path>,
) -> Result<EdenFsConfig, EdenFsError> {
let mut loader = EdenFsConfig::loader();
if let Err(e) = load_system(&mut loader, &etc_eden_dir) {
event!(
Level::INFO,
etc_eden_dir =?etc_eden_dir,
"Unable to load system configuration, skipped: {:?}",
e
);
} else {
event!(Level::DEBUG, "System configuration loaded");
}
if let Err(e) = load_system_rcs(&mut loader, &etc_eden_dir) {
event!(
Level::INFO,
etc_eden_dir =?etc_eden_dir,
"Unable to load system RC configurations, skipped: {:?}",
e
);
} else {
event!(Level::DEBUG, "System RC configurations loaded");
}
if let Some(home) = home_dir {
if let Err(e) = load_user(&mut loader, &home) {
event!(Level::INFO, home =?home, "Unable to load user configuration, skipped: {:?}", e);
} else {
event!(Level::DEBUG, "User configuration loaded");
}
} else {
event!(
Level::INFO,
"Unable to find home dir. User configuration is not loaded."
);
}
Ok(loader.build().map_err(EdenFsError::ConfigurationError)?)
}
| load_system_rcs | identifier_name |
lib.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::path::Path;
use anyhow::{Context, Result};
use serde::{Deserialize, Serialize};
use stack_config::StackConfig;
use tracing::{event, trace, Level};
use edenfs_error::EdenFsError;
#[derive(Serialize, Deserialize, StackConfig, Debug)]
#[serde(rename_all = "camelCase")]
pub struct Core {
#[stack(default)]
eden_directory: Option<String>,
}
#[derive(Serialize, Deserialize, StackConfig, Debug)]
pub struct EdenFsConfig {
#[stack(nested)]
core: Core,
#[stack(merge = "merge_table")]
#[serde(flatten)]
other: toml::value::Table,
}
fn merge_table(lhs: &mut toml::value::Table, rhs: toml::value::Table) {
for (key, value) in rhs.into_iter() {
if let Some(lhs_value) = lhs.get_mut(&key) {
// Key exists
if let (Some(lhs_table), true) = (lhs_value.as_table_mut(), value.is_table()) {
// Both value are table, we merge them
// SAFETY: unwrap here is guaranteed by `value.is_table()`. This
// is awkward because try_into will consume the value, making
// the else-clause not able to use it later.
merge_table(lhs_table, value.try_into::<toml::value::Table>().unwrap());
} else {
// Otherwise, either the values are not table type, or they have
// different types. In both case we prefer rhs value.
*lhs_value = value;
}
} else {
// Key does not exist in lhs
lhs.insert(key, value);
}
}
}
fn load_path(loader: &mut EdenFsConfigLoader, path: &Path) -> Result<()> {
let content = String::from_utf8(std::fs::read(&path)?)?;
trace!(?content,?path, "Loading config");
loader.load(toml::from_str(&content)?);
Ok(())
}
fn load_system(loader: &mut EdenFsConfigLoader, etc_dir: &Path) -> Result<()> {
load_path(loader, &etc_dir.join("edenfs.rc"))
}
fn load_system_rcs(loader: &mut EdenFsConfigLoader, etc_dir: &Path) -> Result<()> {
let rcs_dir = etc_dir.join("config.d");
let entries = std::fs::read_dir(&rcs_dir)
.with_context(|| format!("Unable to read configuration from {:?}", rcs_dir))?;
for rc in entries {
let rc = match rc {
Ok(rc) => rc,
Err(e) => { | continue;
}
};
let name = rc.file_name();
let name = if let Some(name) = name.to_str() {
name
} else {
continue;
};
if name.starts_with('.') ||!name.ends_with(".toml") {
continue;
}
if let Err(e) = load_path(loader, &rc.path()) {
event!(
Level::DEBUG,
"Not able to load '{}': {:?}",
rc.path().display(),
e
);
}
}
Ok(())
}
fn load_user(loader: &mut EdenFsConfigLoader, home_dir: &Path) -> Result<()> {
let home_rc = home_dir.join(".edenrc");
load_path(loader, &home_rc)
}
pub fn load_config(
etc_eden_dir: &Path,
home_dir: Option<&Path>,
) -> Result<EdenFsConfig, EdenFsError> {
let mut loader = EdenFsConfig::loader();
if let Err(e) = load_system(&mut loader, &etc_eden_dir) {
event!(
Level::INFO,
etc_eden_dir =?etc_eden_dir,
"Unable to load system configuration, skipped: {:?}",
e
);
} else {
event!(Level::DEBUG, "System configuration loaded");
}
if let Err(e) = load_system_rcs(&mut loader, &etc_eden_dir) {
event!(
Level::INFO,
etc_eden_dir =?etc_eden_dir,
"Unable to load system RC configurations, skipped: {:?}",
e
);
} else {
event!(Level::DEBUG, "System RC configurations loaded");
}
if let Some(home) = home_dir {
if let Err(e) = load_user(&mut loader, &home) {
event!(Level::INFO, home =?home, "Unable to load user configuration, skipped: {:?}", e);
} else {
event!(Level::DEBUG, "User configuration loaded");
}
} else {
event!(
Level::INFO,
"Unable to find home dir. User configuration is not loaded."
);
}
Ok(loader.build().map_err(EdenFsError::ConfigurationError)?)
} | event!(
Level::INFO,
"Unable to read configuration, skipped: {:?}",
e
); | random_line_split |
sectors.rs | use rand::{seq, ChaChaRng, SeedableRng};
use rayon::prelude::*;
use std::{
collections::HashMap,
sync::atomic::{AtomicBool, Ordering},
time::Instant,
usize::MAX,
};
use config::GameConfig;
use entities::Faction;
use entities::Sector;
use utils::Point;
/// Used for generating sectors.
pub struct SectorGen {}
impl SectorGen {
/// Create a new sector generator.
pub fn new() -> SectorGen {
SectorGen {}
}
/// Split the systems in to a set number of clusters using K-means.
pub fn generate(&self, config: &GameConfig, system_locations: Vec<Point>) -> Vec<Sector> {
// Measure time for generation.
let now = Instant::now();
info!("Simulating expansion for initial sectors...");
let seed: &[_] = &[config.map_seed as u32];
let mut rng: ChaChaRng = ChaChaRng::from_seed(seed);
// Setup initial centroids
let mut centroids =
seq::sample_iter(&mut rng, system_locations.iter(), config.number_of_sectors)
.unwrap()
.into_iter() | .into_iter()
.map(|point| (point, 0))
.collect();
// Run K means until convergence, i.e until no reassignments
let mut has_assigned = true;
while has_assigned {
let wrapped_assigned = AtomicBool::new(false);
// Assign to closest centroid
cluster_map
.par_iter_mut()
.for_each(|(system_location, cluster_id)| {
let mut closest_cluster = *cluster_id;
let mut closest_distance = system_location.distance(¢roids[*cluster_id]);
for (i, centroid) in centroids.iter().enumerate() {
let distance = system_location.distance(centroid);
if distance < closest_distance {
wrapped_assigned.store(true, Ordering::Relaxed);
closest_cluster = i;
closest_distance = distance;
}
}
*cluster_id = closest_cluster;
});
has_assigned = wrapped_assigned.load(Ordering::Relaxed);
// Calculate new centroids
centroids
//.par_iter_mut()
.iter_mut()
.enumerate()
.for_each(|(id, centroid)| {
let mut count = 0.;
let mut new_centroid = Point::origin();
for (system_location, _) in cluster_map.iter().filter(|&(_, c_id)| *c_id == id)
{
new_centroid += *system_location;
count += 1.;
}
new_centroid *= 1. / count;
*centroid = new_centroid;
});
}
// Setup cluster vectors
let mut sector_vecs =
(0..config.number_of_sectors).fold(Vec::<Vec<Point>>::new(), |mut sectors, _| {
sectors.push(vec![]);
sectors
});
// Map systems to final cluster
for (system_location, id) in cluster_map {
sector_vecs[id].push(system_location);
}
// Create sector for each cluster
let sectors = sector_vecs
.into_iter()
.map(|system_locations| {
let sector_seed: &[_] = &[system_locations.len() as u32];
let mut faction_rng: ChaChaRng = SeedableRng::from_seed(sector_seed);
Sector {
system_locations,
faction: Faction::random_faction(&mut faction_rng),
}
})
.collect::<Vec<Sector>>();
info!(
"Mapped galaxy into {} sectors of {} systems, avg size: {},
max size {}, min size {}, taking {} ms \n
Sectors include: {} Cartel, {} Empire, {} Federation, {} Independent",
sectors.len(),
sectors
.iter()
.fold(0, |acc, sec| acc + sec.system_locations.len()),
sectors
.iter()
.fold(0, |acc, sec| acc + sec.system_locations.len())
/ sectors.len(),
sectors
.iter()
.fold(0, |acc, sec| acc.max(sec.system_locations.len())),
sectors
.iter()
.fold(MAX, |acc, sec| acc.min(sec.system_locations.len())),
((now.elapsed().as_secs() * 1_000) + u64::from(now.elapsed().subsec_millis())),
sectors.iter().fold(0, |acc, sec| acc
+ match sec.faction {
Faction::Cartel => 1,
_ => 0,
}),
sectors.iter().fold(0, |acc, sec| acc
+ match sec.faction {
Faction::Empire => 1,
_ => 0,
}),
sectors.iter().fold(0, |acc, sec| acc
+ match sec.faction {
Faction::Federation => 1,
_ => 0,
}),
sectors.iter().fold(0, |acc, sec| acc
+ match sec.faction {
Faction::Independent => 1,
_ => 0,
})
);
sectors
}
} | .cloned()
.collect::<Vec<_>>();
// System to cluster_id mapping
let mut cluster_map: HashMap<Point, usize> = system_locations | random_line_split |
sectors.rs | use rand::{seq, ChaChaRng, SeedableRng};
use rayon::prelude::*;
use std::{
collections::HashMap,
sync::atomic::{AtomicBool, Ordering},
time::Instant,
usize::MAX,
};
use config::GameConfig;
use entities::Faction;
use entities::Sector;
use utils::Point;
/// Used for generating sectors.
pub struct SectorGen {}
impl SectorGen {
/// Create a new sector generator.
pub fn new() -> SectorGen {
SectorGen {}
}
/// Split the systems in to a set number of clusters using K-means.
pub fn | (&self, config: &GameConfig, system_locations: Vec<Point>) -> Vec<Sector> {
// Measure time for generation.
let now = Instant::now();
info!("Simulating expansion for initial sectors...");
let seed: &[_] = &[config.map_seed as u32];
let mut rng: ChaChaRng = ChaChaRng::from_seed(seed);
// Setup initial centroids
let mut centroids =
seq::sample_iter(&mut rng, system_locations.iter(), config.number_of_sectors)
.unwrap()
.into_iter()
.cloned()
.collect::<Vec<_>>();
// System to cluster_id mapping
let mut cluster_map: HashMap<Point, usize> = system_locations
.into_iter()
.map(|point| (point, 0))
.collect();
// Run K means until convergence, i.e until no reassignments
let mut has_assigned = true;
while has_assigned {
let wrapped_assigned = AtomicBool::new(false);
// Assign to closest centroid
cluster_map
.par_iter_mut()
.for_each(|(system_location, cluster_id)| {
let mut closest_cluster = *cluster_id;
let mut closest_distance = system_location.distance(¢roids[*cluster_id]);
for (i, centroid) in centroids.iter().enumerate() {
let distance = system_location.distance(centroid);
if distance < closest_distance {
wrapped_assigned.store(true, Ordering::Relaxed);
closest_cluster = i;
closest_distance = distance;
}
}
*cluster_id = closest_cluster;
});
has_assigned = wrapped_assigned.load(Ordering::Relaxed);
// Calculate new centroids
centroids
//.par_iter_mut()
.iter_mut()
.enumerate()
.for_each(|(id, centroid)| {
let mut count = 0.;
let mut new_centroid = Point::origin();
for (system_location, _) in cluster_map.iter().filter(|&(_, c_id)| *c_id == id)
{
new_centroid += *system_location;
count += 1.;
}
new_centroid *= 1. / count;
*centroid = new_centroid;
});
}
// Setup cluster vectors
let mut sector_vecs =
(0..config.number_of_sectors).fold(Vec::<Vec<Point>>::new(), |mut sectors, _| {
sectors.push(vec![]);
sectors
});
// Map systems to final cluster
for (system_location, id) in cluster_map {
sector_vecs[id].push(system_location);
}
// Create sector for each cluster
let sectors = sector_vecs
.into_iter()
.map(|system_locations| {
let sector_seed: &[_] = &[system_locations.len() as u32];
let mut faction_rng: ChaChaRng = SeedableRng::from_seed(sector_seed);
Sector {
system_locations,
faction: Faction::random_faction(&mut faction_rng),
}
})
.collect::<Vec<Sector>>();
info!(
"Mapped galaxy into {} sectors of {} systems, avg size: {},
max size {}, min size {}, taking {} ms \n
Sectors include: {} Cartel, {} Empire, {} Federation, {} Independent",
sectors.len(),
sectors
.iter()
.fold(0, |acc, sec| acc + sec.system_locations.len()),
sectors
.iter()
.fold(0, |acc, sec| acc + sec.system_locations.len())
/ sectors.len(),
sectors
.iter()
.fold(0, |acc, sec| acc.max(sec.system_locations.len())),
sectors
.iter()
.fold(MAX, |acc, sec| acc.min(sec.system_locations.len())),
((now.elapsed().as_secs() * 1_000) + u64::from(now.elapsed().subsec_millis())),
sectors.iter().fold(0, |acc, sec| acc
+ match sec.faction {
Faction::Cartel => 1,
_ => 0,
}),
sectors.iter().fold(0, |acc, sec| acc
+ match sec.faction {
Faction::Empire => 1,
_ => 0,
}),
sectors.iter().fold(0, |acc, sec| acc
+ match sec.faction {
Faction::Federation => 1,
_ => 0,
}),
sectors.iter().fold(0, |acc, sec| acc
+ match sec.faction {
Faction::Independent => 1,
_ => 0,
})
);
sectors
}
}
| generate | identifier_name |
sectors.rs | use rand::{seq, ChaChaRng, SeedableRng};
use rayon::prelude::*;
use std::{
collections::HashMap,
sync::atomic::{AtomicBool, Ordering},
time::Instant,
usize::MAX,
};
use config::GameConfig;
use entities::Faction;
use entities::Sector;
use utils::Point;
/// Used for generating sectors.
pub struct SectorGen {}
impl SectorGen {
/// Create a new sector generator.
pub fn new() -> SectorGen |
/// Split the systems in to a set number of clusters using K-means.
pub fn generate(&self, config: &GameConfig, system_locations: Vec<Point>) -> Vec<Sector> {
// Measure time for generation.
let now = Instant::now();
info!("Simulating expansion for initial sectors...");
let seed: &[_] = &[config.map_seed as u32];
let mut rng: ChaChaRng = ChaChaRng::from_seed(seed);
// Setup initial centroids
let mut centroids =
seq::sample_iter(&mut rng, system_locations.iter(), config.number_of_sectors)
.unwrap()
.into_iter()
.cloned()
.collect::<Vec<_>>();
// System to cluster_id mapping
let mut cluster_map: HashMap<Point, usize> = system_locations
.into_iter()
.map(|point| (point, 0))
.collect();
// Run K means until convergence, i.e until no reassignments
let mut has_assigned = true;
while has_assigned {
let wrapped_assigned = AtomicBool::new(false);
// Assign to closest centroid
cluster_map
.par_iter_mut()
.for_each(|(system_location, cluster_id)| {
let mut closest_cluster = *cluster_id;
let mut closest_distance = system_location.distance(¢roids[*cluster_id]);
for (i, centroid) in centroids.iter().enumerate() {
let distance = system_location.distance(centroid);
if distance < closest_distance {
wrapped_assigned.store(true, Ordering::Relaxed);
closest_cluster = i;
closest_distance = distance;
}
}
*cluster_id = closest_cluster;
});
has_assigned = wrapped_assigned.load(Ordering::Relaxed);
// Calculate new centroids
centroids
//.par_iter_mut()
.iter_mut()
.enumerate()
.for_each(|(id, centroid)| {
let mut count = 0.;
let mut new_centroid = Point::origin();
for (system_location, _) in cluster_map.iter().filter(|&(_, c_id)| *c_id == id)
{
new_centroid += *system_location;
count += 1.;
}
new_centroid *= 1. / count;
*centroid = new_centroid;
});
}
// Setup cluster vectors
let mut sector_vecs =
(0..config.number_of_sectors).fold(Vec::<Vec<Point>>::new(), |mut sectors, _| {
sectors.push(vec![]);
sectors
});
// Map systems to final cluster
for (system_location, id) in cluster_map {
sector_vecs[id].push(system_location);
}
// Create sector for each cluster
let sectors = sector_vecs
.into_iter()
.map(|system_locations| {
let sector_seed: &[_] = &[system_locations.len() as u32];
let mut faction_rng: ChaChaRng = SeedableRng::from_seed(sector_seed);
Sector {
system_locations,
faction: Faction::random_faction(&mut faction_rng),
}
})
.collect::<Vec<Sector>>();
info!(
"Mapped galaxy into {} sectors of {} systems, avg size: {},
max size {}, min size {}, taking {} ms \n
Sectors include: {} Cartel, {} Empire, {} Federation, {} Independent",
sectors.len(),
sectors
.iter()
.fold(0, |acc, sec| acc + sec.system_locations.len()),
sectors
.iter()
.fold(0, |acc, sec| acc + sec.system_locations.len())
/ sectors.len(),
sectors
.iter()
.fold(0, |acc, sec| acc.max(sec.system_locations.len())),
sectors
.iter()
.fold(MAX, |acc, sec| acc.min(sec.system_locations.len())),
((now.elapsed().as_secs() * 1_000) + u64::from(now.elapsed().subsec_millis())),
sectors.iter().fold(0, |acc, sec| acc
+ match sec.faction {
Faction::Cartel => 1,
_ => 0,
}),
sectors.iter().fold(0, |acc, sec| acc
+ match sec.faction {
Faction::Empire => 1,
_ => 0,
}),
sectors.iter().fold(0, |acc, sec| acc
+ match sec.faction {
Faction::Federation => 1,
_ => 0,
}),
sectors.iter().fold(0, |acc, sec| acc
+ match sec.faction {
Faction::Independent => 1,
_ => 0,
})
);
sectors
}
}
| {
SectorGen {}
} | identifier_body |
sectors.rs | use rand::{seq, ChaChaRng, SeedableRng};
use rayon::prelude::*;
use std::{
collections::HashMap,
sync::atomic::{AtomicBool, Ordering},
time::Instant,
usize::MAX,
};
use config::GameConfig;
use entities::Faction;
use entities::Sector;
use utils::Point;
/// Used for generating sectors.
pub struct SectorGen {}
impl SectorGen {
/// Create a new sector generator.
pub fn new() -> SectorGen {
SectorGen {}
}
/// Split the systems in to a set number of clusters using K-means.
pub fn generate(&self, config: &GameConfig, system_locations: Vec<Point>) -> Vec<Sector> {
// Measure time for generation.
let now = Instant::now();
info!("Simulating expansion for initial sectors...");
let seed: &[_] = &[config.map_seed as u32];
let mut rng: ChaChaRng = ChaChaRng::from_seed(seed);
// Setup initial centroids
let mut centroids =
seq::sample_iter(&mut rng, system_locations.iter(), config.number_of_sectors)
.unwrap()
.into_iter()
.cloned()
.collect::<Vec<_>>();
// System to cluster_id mapping
let mut cluster_map: HashMap<Point, usize> = system_locations
.into_iter()
.map(|point| (point, 0))
.collect();
// Run K means until convergence, i.e until no reassignments
let mut has_assigned = true;
while has_assigned {
let wrapped_assigned = AtomicBool::new(false);
// Assign to closest centroid
cluster_map
.par_iter_mut()
.for_each(|(system_location, cluster_id)| {
let mut closest_cluster = *cluster_id;
let mut closest_distance = system_location.distance(¢roids[*cluster_id]);
for (i, centroid) in centroids.iter().enumerate() {
let distance = system_location.distance(centroid);
if distance < closest_distance |
}
*cluster_id = closest_cluster;
});
has_assigned = wrapped_assigned.load(Ordering::Relaxed);
// Calculate new centroids
centroids
//.par_iter_mut()
.iter_mut()
.enumerate()
.for_each(|(id, centroid)| {
let mut count = 0.;
let mut new_centroid = Point::origin();
for (system_location, _) in cluster_map.iter().filter(|&(_, c_id)| *c_id == id)
{
new_centroid += *system_location;
count += 1.;
}
new_centroid *= 1. / count;
*centroid = new_centroid;
});
}
// Setup cluster vectors
let mut sector_vecs =
(0..config.number_of_sectors).fold(Vec::<Vec<Point>>::new(), |mut sectors, _| {
sectors.push(vec![]);
sectors
});
// Map systems to final cluster
for (system_location, id) in cluster_map {
sector_vecs[id].push(system_location);
}
// Create sector for each cluster
let sectors = sector_vecs
.into_iter()
.map(|system_locations| {
let sector_seed: &[_] = &[system_locations.len() as u32];
let mut faction_rng: ChaChaRng = SeedableRng::from_seed(sector_seed);
Sector {
system_locations,
faction: Faction::random_faction(&mut faction_rng),
}
})
.collect::<Vec<Sector>>();
info!(
"Mapped galaxy into {} sectors of {} systems, avg size: {},
max size {}, min size {}, taking {} ms \n
Sectors include: {} Cartel, {} Empire, {} Federation, {} Independent",
sectors.len(),
sectors
.iter()
.fold(0, |acc, sec| acc + sec.system_locations.len()),
sectors
.iter()
.fold(0, |acc, sec| acc + sec.system_locations.len())
/ sectors.len(),
sectors
.iter()
.fold(0, |acc, sec| acc.max(sec.system_locations.len())),
sectors
.iter()
.fold(MAX, |acc, sec| acc.min(sec.system_locations.len())),
((now.elapsed().as_secs() * 1_000) + u64::from(now.elapsed().subsec_millis())),
sectors.iter().fold(0, |acc, sec| acc
+ match sec.faction {
Faction::Cartel => 1,
_ => 0,
}),
sectors.iter().fold(0, |acc, sec| acc
+ match sec.faction {
Faction::Empire => 1,
_ => 0,
}),
sectors.iter().fold(0, |acc, sec| acc
+ match sec.faction {
Faction::Federation => 1,
_ => 0,
}),
sectors.iter().fold(0, |acc, sec| acc
+ match sec.faction {
Faction::Independent => 1,
_ => 0,
})
);
sectors
}
}
| {
wrapped_assigned.store(true, Ordering::Relaxed);
closest_cluster = i;
closest_distance = distance;
} | conditional_block |
custom_build.rs | use std::collections::{HashMap, BTreeSet};
use std::fs;
use std::io::prelude::*;
use std::path::PathBuf;
use std::str;
use std::sync::{Mutex, Arc};
use core::{PackageId, PackageSet};
use util::{CargoResult, human, Human};
use util::{internal, ChainError, profile, paths};
use util::Freshness;
use super::job::Work;
use super::{fingerprint, process, Kind, Context, Unit};
use super::CommandType;
/// Contains the parsed output of a custom build script.
#[derive(Clone, Debug, Hash)]
pub struct BuildOutput {
/// Paths to pass to rustc with the `-L` flag
pub library_paths: Vec<PathBuf>,
/// Names and link kinds of libraries, suitable for the `-l` flag
pub library_links: Vec<String>,
/// Various `--cfg` flags to pass to the compiler
pub cfgs: Vec<String>,
/// Metadata to pass to the immediate dependencies
pub metadata: Vec<(String, String)>,
}
pub type BuildMap = HashMap<(PackageId, Kind), BuildOutput>;
pub struct BuildState {
pub outputs: Mutex<BuildMap>,
}
#[derive(Default)]
pub struct BuildScripts {
pub to_link: BTreeSet<(PackageId, Kind)>,
pub plugins: BTreeSet<PackageId>,
}
/// Prepares a `Work` that executes the target as a custom build script.
///
/// The `req` given is the requirement which this run of the build script will
/// prepare work for. If the requirement is specified as both the target and the
/// host platforms it is assumed that the two are equal and the build script is
/// only run once (not twice).
pub fn prepare(cx: &mut Context, unit: &Unit)
-> CargoResult<(Work, Work, Freshness)> {
let _p = profile::start(format!("build script prepare: {}/{}",
unit.pkg, unit.target.name()));
let key = (unit.pkg.package_id().clone(), unit.kind);
let overridden = cx.build_state.outputs.lock().unwrap().contains_key(&key);
let (work_dirty, work_fresh) = if overridden {
(Work::new(|_| Ok(())), Work::new(|_| Ok(())))
} else {
try!(build_work(cx, unit))
};
// Now that we've prep'd our work, build the work needed to manage the
// fingerprint and then start returning that upwards.
let (freshness, dirty, fresh) =
try!(fingerprint::prepare_build_cmd(cx, unit));
Ok((work_dirty.then(dirty), work_fresh.then(fresh), freshness))
}
fn build_work(cx: &mut Context, unit: &Unit) -> CargoResult<(Work, Work)> {
let (script_output, build_output) = {
(cx.layout(unit.pkg, Kind::Host).build(unit.pkg),
cx.layout(unit.pkg, unit.kind).build_out(unit.pkg))
};
// Building the command to execute
let to_exec = script_output.join(unit.target.name());
// Start preparing the process to execute, starting out with some
// environment variables. Note that the profile-related environment
// variables are not set with this the build script's profile but rather the
// package's library profile.
let profile = cx.lib_profile(unit.pkg.package_id());
let to_exec = to_exec.into_os_string();
let mut p = try!(super::process(CommandType::Host(to_exec), unit.pkg, cx));
p.env("OUT_DIR", &build_output)
.env("CARGO_MANIFEST_DIR", unit.pkg.root())
.env("NUM_JOBS", &cx.jobs().to_string())
.env("TARGET", &match unit.kind {
Kind::Host => &cx.config.rustc_info().host[..],
Kind::Target => cx.target_triple(),
})
.env("DEBUG", &profile.debuginfo.to_string())
.env("OPT_LEVEL", &profile.opt_level.to_string())
.env("PROFILE", if cx.build_config.release {"release"} else {"debug"})
.env("HOST", &cx.config.rustc_info().host);
// Be sure to pass along all enabled features for this package, this is the
// last piece of statically known information that we have.
if let Some(features) = cx.resolve.features(unit.pkg.package_id()) {
for feat in features.iter() {
p.env(&format!("CARGO_FEATURE_{}", super::envify(feat)), "1");
}
}
// Gather the set of native dependencies that this package has along with
// some other variables to close over.
//
// This information will be used at build-time later on to figure out which
// sorts of variables need to be discovered at that time.
let lib_deps = {
cx.dep_run_custom_build(unit).iter().filter_map(|unit| {
if unit.profile.run_custom_build {
Some((unit.pkg.manifest().links().unwrap().to_string(),
unit.pkg.package_id().clone()))
} else {
None
}
}).collect::<Vec<_>>()
};
let pkg_name = unit.pkg.to_string();
let build_state = cx.build_state.clone();
let id = unit.pkg.package_id().clone();
let all = (id.clone(), pkg_name.clone(), build_state.clone(),
build_output.clone());
let build_scripts = super::load_build_deps(cx, unit);
let kind = unit.kind;
try!(fs::create_dir_all(&cx.layout(unit.pkg, Kind::Host).build(unit.pkg)));
try!(fs::create_dir_all(&cx.layout(unit.pkg, unit.kind).build(unit.pkg)));
let exec_engine = cx.exec_engine.clone();
// Prepare the unit of "dirty work" which will actually run the custom build
// command.
//
// Note that this has to do some extra work just before running the command
// to determine extra environment variables and such.
let dirty = Work::new(move |desc_tx| {
// Make sure that OUT_DIR exists.
//
// If we have an old build directory, then just move it into place,
// otherwise create it!
if fs::metadata(&build_output).is_err() {
try!(fs::create_dir(&build_output).chain_error(|| {
internal("failed to create script output directory for \
build command")
}));
}
// For all our native lib dependencies, pick up their metadata to pass
// along to this custom build command. We're also careful to augment our
// dynamic library search path in case the build script depended on any
// native dynamic libraries.
{
let build_state = build_state.outputs.lock().unwrap();
for (name, id) in lib_deps {
let key = (id.clone(), kind);
let state = try!(build_state.get(&key).chain_error(|| {
internal(format!("failed to locate build state for env \
vars: {}/{:?}", id, kind))
}));
let data = &state.metadata;
for &(ref key, ref value) in data.iter() {
p.env(&format!("DEP_{}_{}", super::envify(&name),
super::envify(key)), value);
}
}
if let Some(build_scripts) = build_scripts {
try!(super::add_plugin_deps(&mut p, &build_state,
&build_scripts));
}
}
// And now finally, run the build command itself!
desc_tx.send(p.to_string()).ok();
let output = try!(exec_engine.exec_with_output(p).map_err(|mut e| {
e.desc = format!("failed to run custom build command for `{}`\n{}",
pkg_name, e.desc);
Human(e)
}));
try!(paths::write(&build_output.parent().unwrap().join("output"),
&output.stdout));
// After the build command has finished running, we need to be sure to
// remember all of its output so we can later discover precisely what it
// was, even if we don't run the build command again (due to freshness).
//
// This is also the location where we provide feedback into the build
// state informing what variables were discovered via our script as
// well.
let output = try!(str::from_utf8(&output.stdout).map_err(|_| {
human("build script output was not valid utf-8")
}));
let parsed_output = try!(BuildOutput::parse(output, &pkg_name));
build_state.insert(id, kind, parsed_output);
Ok(())
});
// Now that we've prepared our work-to-do, we need to prepare the fresh work
// itself to run when we actually end up just discarding what we calculated
// above.
let fresh = Work::new(move |_tx| {
let (id, pkg_name, build_state, build_output) = all;
let contents = try!(paths::read(&build_output.parent().unwrap()
.join("output")));
let output = try!(BuildOutput::parse(&contents, &pkg_name));
build_state.insert(id, kind, output);
Ok(())
});
Ok((dirty, fresh))
}
impl BuildState {
pub fn new(config: &super::BuildConfig,
packages: &PackageSet) -> BuildState {
let mut sources = HashMap::new();
for package in packages.iter() {
match package.manifest().links() {
Some(links) => {
sources.insert(links.to_string(),
package.package_id().clone());
}
None => {}
}
}
let mut outputs = HashMap::new();
let i1 = config.host.overrides.iter().map(|p| (p, Kind::Host));
let i2 = config.target.overrides.iter().map(|p| (p, Kind::Target));
for ((name, output), kind) in i1.chain(i2) {
// If no package is using the library named `name`, then this is
// just an override that we ignore.
if let Some(id) = sources.get(name) {
outputs.insert((id.clone(), kind), output.clone());
}
}
BuildState { outputs: Mutex::new(outputs) }
}
fn insert(&self, id: PackageId, kind: Kind, output: BuildOutput) {
self.outputs.lock().unwrap().insert((id, kind), output);
}
}
impl BuildOutput {
// Parses the output of a script.
// The `pkg_name` is used for error messages.
pub fn parse(input: &str, pkg_name: &str) -> CargoResult<BuildOutput> {
let mut library_paths = Vec::new();
let mut library_links = Vec::new();
let mut cfgs = Vec::new();
let mut metadata = Vec::new();
let whence = format!("build script of `{}`", pkg_name);
for line in input.lines() {
let mut iter = line.splitn(2, ':');
if iter.next()!= Some("cargo") {
// skip this line since it doesn't start with "cargo:"
continue;
}
let data = match iter.next() {
Some(val) => val,
None => continue
};
// getting the `key=value` part of the line
let mut iter = data.splitn(2, '=');
let key = iter.next();
let value = iter.next();
let (key, value) = match (key, value) {
(Some(a), Some(b)) => (a, b.trim_right()),
// line started with `cargo:` but didn't match `key=value`
_ => bail!("Wrong output in {}: `{}`", whence, line),
};
match key {
"rustc-flags" => {
let (libs, links) = try!(
BuildOutput::parse_rustc_flags(value, &whence)
);
library_links.extend(links.into_iter());
library_paths.extend(libs.into_iter());
}
"rustc-link-lib" => library_links.push(value.to_string()),
"rustc-link-search" => library_paths.push(PathBuf::from(value)),
"rustc-cfg" => cfgs.push(value.to_string()),
_ => metadata.push((key.to_string(), value.to_string())),
}
}
Ok(BuildOutput {
library_paths: library_paths,
library_links: library_links,
cfgs: cfgs,
metadata: metadata,
})
}
pub fn parse_rustc_flags(value: &str, whence: &str)
-> CargoResult<(Vec<PathBuf>, Vec<String>)> {
let value = value.trim();
let mut flags_iter = value.split(|c: char| c.is_whitespace())
.filter(|w| w.chars().any(|c|!c.is_whitespace()));
let (mut library_links, mut library_paths) = (Vec::new(), Vec::new());
loop {
let flag = match flags_iter.next() {
Some(f) => f,
None => break
};
if flag!= "-l" && flag!= "-L" {
bail!("Only `-l` and `-L` flags are allowed in {}: `{}`",
whence, value)
}
let value = match flags_iter.next() {
Some(v) => v,
None => bail!("Flag in rustc-flags has no value in {}: `{}`",
whence, value)
};
match flag {
"-l" => library_links.push(value.to_string()),
"-L" => library_paths.push(PathBuf::from(value)),
// was already checked above
_ => bail!("only -l and -L flags are allowed")
};
}
Ok((library_paths, library_links))
}
}
/// Compute the `build_scripts` map in the `Context` which tracks what build
/// scripts each package depends on.
///
/// The global `build_scripts` map lists for all (package, kind) tuples what set
/// of packages' build script outputs must be considered. For example this lists
/// all dependencies' `-L` flags which need to be propagated transitively.
///
/// The given set of targets to this function is the initial set of
/// targets/profiles which are being built.
pub fn build_map<'b, 'cfg>(cx: &mut Context<'b, 'cfg>,
units: &[Unit<'b>]) {
let mut ret = HashMap::new();
for unit in units {
build(&mut ret, cx, unit);
}
cx.build_scripts.extend(ret.into_iter().map(|(k, v)| {
(k, Arc::new(v))
}));
// Recursive function to build up the map we're constructing. This function
// memoizes all of its return values as it goes along.
fn build<'a, 'b, 'cfg>(out: &'a mut HashMap<Unit<'b>, BuildScripts>,
cx: &Context<'b, 'cfg>,
unit: &Unit<'b>)
-> &'a BuildScripts | to_link.extend(dep_scripts.to_link.iter().cloned());
}
}
let prev = out.entry(*unit).or_insert(BuildScripts::default());
prev.to_link.extend(to_link);
prev.plugins.extend(plugins);
return prev
}
}
| {
// Do a quick pre-flight check to see if we've already calculated the
// set of dependencies.
if out.contains_key(unit) {
return &out[unit]
}
let mut to_link = BTreeSet::new();
let mut plugins = BTreeSet::new();
if !unit.target.is_custom_build() && unit.pkg.has_custom_build() {
to_link.insert((unit.pkg.package_id().clone(), unit.kind));
}
for unit in cx.dep_targets(unit).iter() {
let dep_scripts = build(out, cx, unit);
if unit.target.for_host() {
plugins.extend(dep_scripts.to_link.iter()
.map(|p| &p.0).cloned());
} else if unit.target.linkable() { | identifier_body |
custom_build.rs | use std::collections::{HashMap, BTreeSet};
use std::fs;
use std::io::prelude::*;
use std::path::PathBuf;
use std::str;
use std::sync::{Mutex, Arc};
use core::{PackageId, PackageSet};
use util::{CargoResult, human, Human};
use util::{internal, ChainError, profile, paths};
use util::Freshness;
use super::job::Work;
use super::{fingerprint, process, Kind, Context, Unit};
use super::CommandType;
/// Contains the parsed output of a custom build script.
#[derive(Clone, Debug, Hash)]
pub struct BuildOutput {
/// Paths to pass to rustc with the `-L` flag
pub library_paths: Vec<PathBuf>,
/// Names and link kinds of libraries, suitable for the `-l` flag
pub library_links: Vec<String>,
/// Various `--cfg` flags to pass to the compiler
pub cfgs: Vec<String>,
/// Metadata to pass to the immediate dependencies
pub metadata: Vec<(String, String)>,
}
pub type BuildMap = HashMap<(PackageId, Kind), BuildOutput>;
pub struct BuildState {
pub outputs: Mutex<BuildMap>,
}
#[derive(Default)]
pub struct BuildScripts {
pub to_link: BTreeSet<(PackageId, Kind)>,
pub plugins: BTreeSet<PackageId>,
}
/// Prepares a `Work` that executes the target as a custom build script.
///
/// The `req` given is the requirement which this run of the build script will
/// prepare work for. If the requirement is specified as both the target and the
/// host platforms it is assumed that the two are equal and the build script is
/// only run once (not twice).
pub fn prepare(cx: &mut Context, unit: &Unit)
-> CargoResult<(Work, Work, Freshness)> {
let _p = profile::start(format!("build script prepare: {}/{}",
unit.pkg, unit.target.name()));
let key = (unit.pkg.package_id().clone(), unit.kind);
let overridden = cx.build_state.outputs.lock().unwrap().contains_key(&key);
let (work_dirty, work_fresh) = if overridden {
(Work::new(|_| Ok(())), Work::new(|_| Ok(())))
} else {
try!(build_work(cx, unit))
};
// Now that we've prep'd our work, build the work needed to manage the
// fingerprint and then start returning that upwards.
let (freshness, dirty, fresh) =
try!(fingerprint::prepare_build_cmd(cx, unit));
Ok((work_dirty.then(dirty), work_fresh.then(fresh), freshness))
}
fn build_work(cx: &mut Context, unit: &Unit) -> CargoResult<(Work, Work)> {
let (script_output, build_output) = {
(cx.layout(unit.pkg, Kind::Host).build(unit.pkg),
cx.layout(unit.pkg, unit.kind).build_out(unit.pkg))
};
// Building the command to execute
let to_exec = script_output.join(unit.target.name());
// Start preparing the process to execute, starting out with some
// environment variables. Note that the profile-related environment
// variables are not set with this the build script's profile but rather the
// package's library profile.
let profile = cx.lib_profile(unit.pkg.package_id());
let to_exec = to_exec.into_os_string();
let mut p = try!(super::process(CommandType::Host(to_exec), unit.pkg, cx));
p.env("OUT_DIR", &build_output)
.env("CARGO_MANIFEST_DIR", unit.pkg.root())
.env("NUM_JOBS", &cx.jobs().to_string())
.env("TARGET", &match unit.kind {
Kind::Host => &cx.config.rustc_info().host[..],
Kind::Target => cx.target_triple(),
})
.env("DEBUG", &profile.debuginfo.to_string())
.env("OPT_LEVEL", &profile.opt_level.to_string())
.env("PROFILE", if cx.build_config.release {"release"} else {"debug"})
.env("HOST", &cx.config.rustc_info().host);
// Be sure to pass along all enabled features for this package, this is the
// last piece of statically known information that we have.
if let Some(features) = cx.resolve.features(unit.pkg.package_id()) {
for feat in features.iter() {
p.env(&format!("CARGO_FEATURE_{}", super::envify(feat)), "1");
}
}
// Gather the set of native dependencies that this package has along with
// some other variables to close over.
//
// This information will be used at build-time later on to figure out which
// sorts of variables need to be discovered at that time.
let lib_deps = {
cx.dep_run_custom_build(unit).iter().filter_map(|unit| {
if unit.profile.run_custom_build {
Some((unit.pkg.manifest().links().unwrap().to_string(),
unit.pkg.package_id().clone()))
} else {
None
}
}).collect::<Vec<_>>()
};
let pkg_name = unit.pkg.to_string();
let build_state = cx.build_state.clone();
let id = unit.pkg.package_id().clone();
let all = (id.clone(), pkg_name.clone(), build_state.clone(),
build_output.clone());
let build_scripts = super::load_build_deps(cx, unit);
let kind = unit.kind;
try!(fs::create_dir_all(&cx.layout(unit.pkg, Kind::Host).build(unit.pkg)));
try!(fs::create_dir_all(&cx.layout(unit.pkg, unit.kind).build(unit.pkg)));
let exec_engine = cx.exec_engine.clone();
// Prepare the unit of "dirty work" which will actually run the custom build
// command.
//
// Note that this has to do some extra work just before running the command
// to determine extra environment variables and such.
let dirty = Work::new(move |desc_tx| {
// Make sure that OUT_DIR exists.
//
// If we have an old build directory, then just move it into place,
// otherwise create it!
if fs::metadata(&build_output).is_err() {
try!(fs::create_dir(&build_output).chain_error(|| {
internal("failed to create script output directory for \
build command")
}));
}
// For all our native lib dependencies, pick up their metadata to pass
// along to this custom build command. We're also careful to augment our
// dynamic library search path in case the build script depended on any
// native dynamic libraries.
{
let build_state = build_state.outputs.lock().unwrap();
for (name, id) in lib_deps {
let key = (id.clone(), kind);
let state = try!(build_state.get(&key).chain_error(|| {
internal(format!("failed to locate build state for env \
vars: {}/{:?}", id, kind))
}));
let data = &state.metadata;
for &(ref key, ref value) in data.iter() {
p.env(&format!("DEP_{}_{}", super::envify(&name),
super::envify(key)), value);
}
}
if let Some(build_scripts) = build_scripts {
try!(super::add_plugin_deps(&mut p, &build_state,
&build_scripts));
}
}
// And now finally, run the build command itself!
desc_tx.send(p.to_string()).ok();
let output = try!(exec_engine.exec_with_output(p).map_err(|mut e| {
e.desc = format!("failed to run custom build command for `{}`\n{}",
pkg_name, e.desc);
Human(e)
}));
try!(paths::write(&build_output.parent().unwrap().join("output"),
&output.stdout));
// After the build command has finished running, we need to be sure to
// remember all of its output so we can later discover precisely what it
// was, even if we don't run the build command again (due to freshness).
//
// This is also the location where we provide feedback into the build
// state informing what variables were discovered via our script as
// well.
let output = try!(str::from_utf8(&output.stdout).map_err(|_| {
human("build script output was not valid utf-8")
}));
let parsed_output = try!(BuildOutput::parse(output, &pkg_name));
build_state.insert(id, kind, parsed_output);
Ok(())
});
// Now that we've prepared our work-to-do, we need to prepare the fresh work
// itself to run when we actually end up just discarding what we calculated
// above.
let fresh = Work::new(move |_tx| {
let (id, pkg_name, build_state, build_output) = all;
let contents = try!(paths::read(&build_output.parent().unwrap()
.join("output")));
let output = try!(BuildOutput::parse(&contents, &pkg_name));
build_state.insert(id, kind, output);
Ok(())
});
Ok((dirty, fresh))
}
impl BuildState {
pub fn new(config: &super::BuildConfig,
packages: &PackageSet) -> BuildState {
let mut sources = HashMap::new();
for package in packages.iter() {
match package.manifest().links() {
Some(links) => {
sources.insert(links.to_string(),
package.package_id().clone());
}
None => {}
}
}
let mut outputs = HashMap::new();
let i1 = config.host.overrides.iter().map(|p| (p, Kind::Host));
let i2 = config.target.overrides.iter().map(|p| (p, Kind::Target));
for ((name, output), kind) in i1.chain(i2) {
// If no package is using the library named `name`, then this is
// just an override that we ignore.
if let Some(id) = sources.get(name) {
outputs.insert((id.clone(), kind), output.clone());
}
}
BuildState { outputs: Mutex::new(outputs) }
}
fn insert(&self, id: PackageId, kind: Kind, output: BuildOutput) {
self.outputs.lock().unwrap().insert((id, kind), output);
}
}
impl BuildOutput {
// Parses the output of a script.
// The `pkg_name` is used for error messages.
pub fn parse(input: &str, pkg_name: &str) -> CargoResult<BuildOutput> {
let mut library_paths = Vec::new();
let mut library_links = Vec::new();
let mut cfgs = Vec::new();
let mut metadata = Vec::new();
let whence = format!("build script of `{}`", pkg_name);
for line in input.lines() {
let mut iter = line.splitn(2, ':');
if iter.next()!= Some("cargo") |
let data = match iter.next() {
Some(val) => val,
None => continue
};
// getting the `key=value` part of the line
let mut iter = data.splitn(2, '=');
let key = iter.next();
let value = iter.next();
let (key, value) = match (key, value) {
(Some(a), Some(b)) => (a, b.trim_right()),
// line started with `cargo:` but didn't match `key=value`
_ => bail!("Wrong output in {}: `{}`", whence, line),
};
match key {
"rustc-flags" => {
let (libs, links) = try!(
BuildOutput::parse_rustc_flags(value, &whence)
);
library_links.extend(links.into_iter());
library_paths.extend(libs.into_iter());
}
"rustc-link-lib" => library_links.push(value.to_string()),
"rustc-link-search" => library_paths.push(PathBuf::from(value)),
"rustc-cfg" => cfgs.push(value.to_string()),
_ => metadata.push((key.to_string(), value.to_string())),
}
}
Ok(BuildOutput {
library_paths: library_paths,
library_links: library_links,
cfgs: cfgs,
metadata: metadata,
})
}
pub fn parse_rustc_flags(value: &str, whence: &str)
-> CargoResult<(Vec<PathBuf>, Vec<String>)> {
let value = value.trim();
let mut flags_iter = value.split(|c: char| c.is_whitespace())
.filter(|w| w.chars().any(|c|!c.is_whitespace()));
let (mut library_links, mut library_paths) = (Vec::new(), Vec::new());
loop {
let flag = match flags_iter.next() {
Some(f) => f,
None => break
};
if flag!= "-l" && flag!= "-L" {
bail!("Only `-l` and `-L` flags are allowed in {}: `{}`",
whence, value)
}
let value = match flags_iter.next() {
Some(v) => v,
None => bail!("Flag in rustc-flags has no value in {}: `{}`",
whence, value)
};
match flag {
"-l" => library_links.push(value.to_string()),
"-L" => library_paths.push(PathBuf::from(value)),
// was already checked above
_ => bail!("only -l and -L flags are allowed")
};
}
Ok((library_paths, library_links))
}
}
/// Compute the `build_scripts` map in the `Context` which tracks what build
/// scripts each package depends on.
///
/// The global `build_scripts` map lists for all (package, kind) tuples what set
/// of packages' build script outputs must be considered. For example this lists
/// all dependencies' `-L` flags which need to be propagated transitively.
///
/// The given set of targets to this function is the initial set of
/// targets/profiles which are being built.
pub fn build_map<'b, 'cfg>(cx: &mut Context<'b, 'cfg>,
units: &[Unit<'b>]) {
let mut ret = HashMap::new();
for unit in units {
build(&mut ret, cx, unit);
}
cx.build_scripts.extend(ret.into_iter().map(|(k, v)| {
(k, Arc::new(v))
}));
// Recursive function to build up the map we're constructing. This function
// memoizes all of its return values as it goes along.
fn build<'a, 'b, 'cfg>(out: &'a mut HashMap<Unit<'b>, BuildScripts>,
cx: &Context<'b, 'cfg>,
unit: &Unit<'b>)
-> &'a BuildScripts {
// Do a quick pre-flight check to see if we've already calculated the
// set of dependencies.
if out.contains_key(unit) {
return &out[unit]
}
let mut to_link = BTreeSet::new();
let mut plugins = BTreeSet::new();
if!unit.target.is_custom_build() && unit.pkg.has_custom_build() {
to_link.insert((unit.pkg.package_id().clone(), unit.kind));
}
for unit in cx.dep_targets(unit).iter() {
let dep_scripts = build(out, cx, unit);
if unit.target.for_host() {
plugins.extend(dep_scripts.to_link.iter()
.map(|p| &p.0).cloned());
} else if unit.target.linkable() {
to_link.extend(dep_scripts.to_link.iter().cloned());
}
}
let prev = out.entry(*unit).or_insert(BuildScripts::default());
prev.to_link.extend(to_link);
prev.plugins.extend(plugins);
return prev
}
}
| {
// skip this line since it doesn't start with "cargo:"
continue;
} | conditional_block |
custom_build.rs | use std::collections::{HashMap, BTreeSet};
use std::fs;
use std::io::prelude::*;
use std::path::PathBuf;
use std::str;
use std::sync::{Mutex, Arc};
use core::{PackageId, PackageSet};
use util::{CargoResult, human, Human};
use util::{internal, ChainError, profile, paths};
use util::Freshness;
use super::job::Work;
use super::{fingerprint, process, Kind, Context, Unit};
use super::CommandType;
/// Contains the parsed output of a custom build script.
#[derive(Clone, Debug, Hash)]
pub struct BuildOutput {
/// Paths to pass to rustc with the `-L` flag
pub library_paths: Vec<PathBuf>,
/// Names and link kinds of libraries, suitable for the `-l` flag
pub library_links: Vec<String>,
/// Various `--cfg` flags to pass to the compiler
pub cfgs: Vec<String>,
/// Metadata to pass to the immediate dependencies
pub metadata: Vec<(String, String)>,
}
pub type BuildMap = HashMap<(PackageId, Kind), BuildOutput>;
pub struct BuildState {
pub outputs: Mutex<BuildMap>,
}
#[derive(Default)]
pub struct BuildScripts {
pub to_link: BTreeSet<(PackageId, Kind)>,
pub plugins: BTreeSet<PackageId>,
}
/// Prepares a `Work` that executes the target as a custom build script.
///
/// The `req` given is the requirement which this run of the build script will
/// prepare work for. If the requirement is specified as both the target and the
/// host platforms it is assumed that the two are equal and the build script is
/// only run once (not twice).
pub fn prepare(cx: &mut Context, unit: &Unit)
-> CargoResult<(Work, Work, Freshness)> {
let _p = profile::start(format!("build script prepare: {}/{}",
unit.pkg, unit.target.name()));
let key = (unit.pkg.package_id().clone(), unit.kind);
let overridden = cx.build_state.outputs.lock().unwrap().contains_key(&key);
let (work_dirty, work_fresh) = if overridden {
(Work::new(|_| Ok(())), Work::new(|_| Ok(())))
} else {
try!(build_work(cx, unit))
};
// Now that we've prep'd our work, build the work needed to manage the
// fingerprint and then start returning that upwards.
let (freshness, dirty, fresh) =
try!(fingerprint::prepare_build_cmd(cx, unit));
Ok((work_dirty.then(dirty), work_fresh.then(fresh), freshness))
}
fn build_work(cx: &mut Context, unit: &Unit) -> CargoResult<(Work, Work)> {
let (script_output, build_output) = {
(cx.layout(unit.pkg, Kind::Host).build(unit.pkg),
cx.layout(unit.pkg, unit.kind).build_out(unit.pkg))
};
// Building the command to execute
let to_exec = script_output.join(unit.target.name());
// Start preparing the process to execute, starting out with some
// environment variables. Note that the profile-related environment
// variables are not set with this the build script's profile but rather the
// package's library profile.
let profile = cx.lib_profile(unit.pkg.package_id());
let to_exec = to_exec.into_os_string();
let mut p = try!(super::process(CommandType::Host(to_exec), unit.pkg, cx));
p.env("OUT_DIR", &build_output)
.env("CARGO_MANIFEST_DIR", unit.pkg.root())
.env("NUM_JOBS", &cx.jobs().to_string())
.env("TARGET", &match unit.kind {
Kind::Host => &cx.config.rustc_info().host[..],
Kind::Target => cx.target_triple(),
})
.env("DEBUG", &profile.debuginfo.to_string())
.env("OPT_LEVEL", &profile.opt_level.to_string())
.env("PROFILE", if cx.build_config.release {"release"} else {"debug"})
.env("HOST", &cx.config.rustc_info().host);
// Be sure to pass along all enabled features for this package, this is the
// last piece of statically known information that we have.
if let Some(features) = cx.resolve.features(unit.pkg.package_id()) {
for feat in features.iter() {
p.env(&format!("CARGO_FEATURE_{}", super::envify(feat)), "1");
}
}
// Gather the set of native dependencies that this package has along with
// some other variables to close over.
//
// This information will be used at build-time later on to figure out which
// sorts of variables need to be discovered at that time.
let lib_deps = {
cx.dep_run_custom_build(unit).iter().filter_map(|unit| {
if unit.profile.run_custom_build {
Some((unit.pkg.manifest().links().unwrap().to_string(),
unit.pkg.package_id().clone()))
} else {
None
}
}).collect::<Vec<_>>()
};
let pkg_name = unit.pkg.to_string();
let build_state = cx.build_state.clone();
let id = unit.pkg.package_id().clone();
let all = (id.clone(), pkg_name.clone(), build_state.clone(),
build_output.clone());
let build_scripts = super::load_build_deps(cx, unit);
let kind = unit.kind;
try!(fs::create_dir_all(&cx.layout(unit.pkg, Kind::Host).build(unit.pkg)));
try!(fs::create_dir_all(&cx.layout(unit.pkg, unit.kind).build(unit.pkg)));
let exec_engine = cx.exec_engine.clone();
// Prepare the unit of "dirty work" which will actually run the custom build
// command.
//
// Note that this has to do some extra work just before running the command
// to determine extra environment variables and such.
let dirty = Work::new(move |desc_tx| {
// Make sure that OUT_DIR exists.
//
// If we have an old build directory, then just move it into place,
// otherwise create it!
if fs::metadata(&build_output).is_err() {
try!(fs::create_dir(&build_output).chain_error(|| {
internal("failed to create script output directory for \
build command")
}));
}
// For all our native lib dependencies, pick up their metadata to pass
// along to this custom build command. We're also careful to augment our
// dynamic library search path in case the build script depended on any
// native dynamic libraries.
{
let build_state = build_state.outputs.lock().unwrap();
for (name, id) in lib_deps {
let key = (id.clone(), kind);
let state = try!(build_state.get(&key).chain_error(|| {
internal(format!("failed to locate build state for env \
vars: {}/{:?}", id, kind))
}));
let data = &state.metadata;
for &(ref key, ref value) in data.iter() {
p.env(&format!("DEP_{}_{}", super::envify(&name),
super::envify(key)), value);
}
}
if let Some(build_scripts) = build_scripts {
try!(super::add_plugin_deps(&mut p, &build_state,
&build_scripts));
}
}
// And now finally, run the build command itself!
desc_tx.send(p.to_string()).ok();
let output = try!(exec_engine.exec_with_output(p).map_err(|mut e| {
e.desc = format!("failed to run custom build command for `{}`\n{}",
pkg_name, e.desc);
Human(e)
}));
try!(paths::write(&build_output.parent().unwrap().join("output"),
&output.stdout));
// After the build command has finished running, we need to be sure to
// remember all of its output so we can later discover precisely what it
// was, even if we don't run the build command again (due to freshness).
//
// This is also the location where we provide feedback into the build
// state informing what variables were discovered via our script as
// well.
let output = try!(str::from_utf8(&output.stdout).map_err(|_| {
human("build script output was not valid utf-8")
}));
let parsed_output = try!(BuildOutput::parse(output, &pkg_name));
build_state.insert(id, kind, parsed_output);
Ok(())
});
// Now that we've prepared our work-to-do, we need to prepare the fresh work
// itself to run when we actually end up just discarding what we calculated
// above.
let fresh = Work::new(move |_tx| {
let (id, pkg_name, build_state, build_output) = all;
let contents = try!(paths::read(&build_output.parent().unwrap()
.join("output")));
let output = try!(BuildOutput::parse(&contents, &pkg_name));
build_state.insert(id, kind, output);
Ok(())
});
Ok((dirty, fresh))
}
impl BuildState {
pub fn | (config: &super::BuildConfig,
packages: &PackageSet) -> BuildState {
let mut sources = HashMap::new();
for package in packages.iter() {
match package.manifest().links() {
Some(links) => {
sources.insert(links.to_string(),
package.package_id().clone());
}
None => {}
}
}
let mut outputs = HashMap::new();
let i1 = config.host.overrides.iter().map(|p| (p, Kind::Host));
let i2 = config.target.overrides.iter().map(|p| (p, Kind::Target));
for ((name, output), kind) in i1.chain(i2) {
// If no package is using the library named `name`, then this is
// just an override that we ignore.
if let Some(id) = sources.get(name) {
outputs.insert((id.clone(), kind), output.clone());
}
}
BuildState { outputs: Mutex::new(outputs) }
}
fn insert(&self, id: PackageId, kind: Kind, output: BuildOutput) {
self.outputs.lock().unwrap().insert((id, kind), output);
}
}
impl BuildOutput {
// Parses the output of a script.
// The `pkg_name` is used for error messages.
pub fn parse(input: &str, pkg_name: &str) -> CargoResult<BuildOutput> {
let mut library_paths = Vec::new();
let mut library_links = Vec::new();
let mut cfgs = Vec::new();
let mut metadata = Vec::new();
let whence = format!("build script of `{}`", pkg_name);
for line in input.lines() {
let mut iter = line.splitn(2, ':');
if iter.next()!= Some("cargo") {
// skip this line since it doesn't start with "cargo:"
continue;
}
let data = match iter.next() {
Some(val) => val,
None => continue
};
// getting the `key=value` part of the line
let mut iter = data.splitn(2, '=');
let key = iter.next();
let value = iter.next();
let (key, value) = match (key, value) {
(Some(a), Some(b)) => (a, b.trim_right()),
// line started with `cargo:` but didn't match `key=value`
_ => bail!("Wrong output in {}: `{}`", whence, line),
};
match key {
"rustc-flags" => {
let (libs, links) = try!(
BuildOutput::parse_rustc_flags(value, &whence)
);
library_links.extend(links.into_iter());
library_paths.extend(libs.into_iter());
}
"rustc-link-lib" => library_links.push(value.to_string()),
"rustc-link-search" => library_paths.push(PathBuf::from(value)),
"rustc-cfg" => cfgs.push(value.to_string()),
_ => metadata.push((key.to_string(), value.to_string())),
}
}
Ok(BuildOutput {
library_paths: library_paths,
library_links: library_links,
cfgs: cfgs,
metadata: metadata,
})
}
pub fn parse_rustc_flags(value: &str, whence: &str)
-> CargoResult<(Vec<PathBuf>, Vec<String>)> {
let value = value.trim();
let mut flags_iter = value.split(|c: char| c.is_whitespace())
.filter(|w| w.chars().any(|c|!c.is_whitespace()));
let (mut library_links, mut library_paths) = (Vec::new(), Vec::new());
loop {
let flag = match flags_iter.next() {
Some(f) => f,
None => break
};
if flag!= "-l" && flag!= "-L" {
bail!("Only `-l` and `-L` flags are allowed in {}: `{}`",
whence, value)
}
let value = match flags_iter.next() {
Some(v) => v,
None => bail!("Flag in rustc-flags has no value in {}: `{}`",
whence, value)
};
match flag {
"-l" => library_links.push(value.to_string()),
"-L" => library_paths.push(PathBuf::from(value)),
// was already checked above
_ => bail!("only -l and -L flags are allowed")
};
}
Ok((library_paths, library_links))
}
}
/// Compute the `build_scripts` map in the `Context` which tracks what build
/// scripts each package depends on.
///
/// The global `build_scripts` map lists for all (package, kind) tuples what set
/// of packages' build script outputs must be considered. For example this lists
/// all dependencies' `-L` flags which need to be propagated transitively.
///
/// The given set of targets to this function is the initial set of
/// targets/profiles which are being built.
pub fn build_map<'b, 'cfg>(cx: &mut Context<'b, 'cfg>,
units: &[Unit<'b>]) {
let mut ret = HashMap::new();
for unit in units {
build(&mut ret, cx, unit);
}
cx.build_scripts.extend(ret.into_iter().map(|(k, v)| {
(k, Arc::new(v))
}));
// Recursive function to build up the map we're constructing. This function
// memoizes all of its return values as it goes along.
fn build<'a, 'b, 'cfg>(out: &'a mut HashMap<Unit<'b>, BuildScripts>,
cx: &Context<'b, 'cfg>,
unit: &Unit<'b>)
-> &'a BuildScripts {
// Do a quick pre-flight check to see if we've already calculated the
// set of dependencies.
if out.contains_key(unit) {
return &out[unit]
}
let mut to_link = BTreeSet::new();
let mut plugins = BTreeSet::new();
if!unit.target.is_custom_build() && unit.pkg.has_custom_build() {
to_link.insert((unit.pkg.package_id().clone(), unit.kind));
}
for unit in cx.dep_targets(unit).iter() {
let dep_scripts = build(out, cx, unit);
if unit.target.for_host() {
plugins.extend(dep_scripts.to_link.iter()
.map(|p| &p.0).cloned());
} else if unit.target.linkable() {
to_link.extend(dep_scripts.to_link.iter().cloned());
}
}
let prev = out.entry(*unit).or_insert(BuildScripts::default());
prev.to_link.extend(to_link);
prev.plugins.extend(plugins);
return prev
}
}
| new | identifier_name |
custom_build.rs | use std::collections::{HashMap, BTreeSet};
use std::fs;
use std::io::prelude::*;
use std::path::PathBuf;
use std::str;
use std::sync::{Mutex, Arc};
use core::{PackageId, PackageSet};
use util::{CargoResult, human, Human};
use util::{internal, ChainError, profile, paths};
use util::Freshness;
use super::job::Work;
use super::{fingerprint, process, Kind, Context, Unit};
use super::CommandType;
/// Contains the parsed output of a custom build script.
#[derive(Clone, Debug, Hash)]
pub struct BuildOutput {
/// Paths to pass to rustc with the `-L` flag
pub library_paths: Vec<PathBuf>,
/// Names and link kinds of libraries, suitable for the `-l` flag
pub library_links: Vec<String>,
/// Various `--cfg` flags to pass to the compiler
pub cfgs: Vec<String>,
/// Metadata to pass to the immediate dependencies
pub metadata: Vec<(String, String)>,
}
pub type BuildMap = HashMap<(PackageId, Kind), BuildOutput>;
pub struct BuildState {
pub outputs: Mutex<BuildMap>,
}
#[derive(Default)]
pub struct BuildScripts {
pub to_link: BTreeSet<(PackageId, Kind)>,
pub plugins: BTreeSet<PackageId>,
}
/// Prepares a `Work` that executes the target as a custom build script.
///
/// The `req` given is the requirement which this run of the build script will
/// prepare work for. If the requirement is specified as both the target and the
/// host platforms it is assumed that the two are equal and the build script is
/// only run once (not twice).
pub fn prepare(cx: &mut Context, unit: &Unit)
-> CargoResult<(Work, Work, Freshness)> {
let _p = profile::start(format!("build script prepare: {}/{}",
unit.pkg, unit.target.name()));
let key = (unit.pkg.package_id().clone(), unit.kind);
let overridden = cx.build_state.outputs.lock().unwrap().contains_key(&key);
let (work_dirty, work_fresh) = if overridden {
(Work::new(|_| Ok(())), Work::new(|_| Ok(())))
} else {
try!(build_work(cx, unit))
};
// Now that we've prep'd our work, build the work needed to manage the
// fingerprint and then start returning that upwards.
let (freshness, dirty, fresh) =
try!(fingerprint::prepare_build_cmd(cx, unit));
Ok((work_dirty.then(dirty), work_fresh.then(fresh), freshness))
}
fn build_work(cx: &mut Context, unit: &Unit) -> CargoResult<(Work, Work)> {
let (script_output, build_output) = {
(cx.layout(unit.pkg, Kind::Host).build(unit.pkg),
cx.layout(unit.pkg, unit.kind).build_out(unit.pkg))
};
// Building the command to execute
let to_exec = script_output.join(unit.target.name());
// Start preparing the process to execute, starting out with some
// environment variables. Note that the profile-related environment
// variables are not set with this the build script's profile but rather the
// package's library profile.
let profile = cx.lib_profile(unit.pkg.package_id());
let to_exec = to_exec.into_os_string();
let mut p = try!(super::process(CommandType::Host(to_exec), unit.pkg, cx));
p.env("OUT_DIR", &build_output)
.env("CARGO_MANIFEST_DIR", unit.pkg.root())
.env("NUM_JOBS", &cx.jobs().to_string())
.env("TARGET", &match unit.kind {
Kind::Host => &cx.config.rustc_info().host[..],
Kind::Target => cx.target_triple(),
})
.env("DEBUG", &profile.debuginfo.to_string())
.env("OPT_LEVEL", &profile.opt_level.to_string())
.env("PROFILE", if cx.build_config.release {"release"} else {"debug"})
.env("HOST", &cx.config.rustc_info().host);
// Be sure to pass along all enabled features for this package, this is the
// last piece of statically known information that we have.
if let Some(features) = cx.resolve.features(unit.pkg.package_id()) {
for feat in features.iter() {
p.env(&format!("CARGO_FEATURE_{}", super::envify(feat)), "1");
}
}
// Gather the set of native dependencies that this package has along with
// some other variables to close over.
//
// This information will be used at build-time later on to figure out which
// sorts of variables need to be discovered at that time.
let lib_deps = {
cx.dep_run_custom_build(unit).iter().filter_map(|unit| {
if unit.profile.run_custom_build {
Some((unit.pkg.manifest().links().unwrap().to_string(),
unit.pkg.package_id().clone()))
} else {
None
}
}).collect::<Vec<_>>()
};
let pkg_name = unit.pkg.to_string();
let build_state = cx.build_state.clone();
let id = unit.pkg.package_id().clone();
let all = (id.clone(), pkg_name.clone(), build_state.clone(),
build_output.clone());
let build_scripts = super::load_build_deps(cx, unit);
let kind = unit.kind;
try!(fs::create_dir_all(&cx.layout(unit.pkg, Kind::Host).build(unit.pkg)));
try!(fs::create_dir_all(&cx.layout(unit.pkg, unit.kind).build(unit.pkg)));
let exec_engine = cx.exec_engine.clone();
// Prepare the unit of "dirty work" which will actually run the custom build
// command.
//
// Note that this has to do some extra work just before running the command
// to determine extra environment variables and such.
let dirty = Work::new(move |desc_tx| {
// Make sure that OUT_DIR exists.
//
// If we have an old build directory, then just move it into place,
// otherwise create it!
if fs::metadata(&build_output).is_err() {
try!(fs::create_dir(&build_output).chain_error(|| {
internal("failed to create script output directory for \
build command")
}));
}
// For all our native lib dependencies, pick up their metadata to pass
// along to this custom build command. We're also careful to augment our
// dynamic library search path in case the build script depended on any
// native dynamic libraries.
{
let build_state = build_state.outputs.lock().unwrap();
for (name, id) in lib_deps {
let key = (id.clone(), kind);
let state = try!(build_state.get(&key).chain_error(|| {
internal(format!("failed to locate build state for env \
vars: {}/{:?}", id, kind))
}));
let data = &state.metadata;
for &(ref key, ref value) in data.iter() {
p.env(&format!("DEP_{}_{}", super::envify(&name),
super::envify(key)), value);
}
}
if let Some(build_scripts) = build_scripts {
try!(super::add_plugin_deps(&mut p, &build_state,
&build_scripts));
}
}
// And now finally, run the build command itself!
desc_tx.send(p.to_string()).ok();
let output = try!(exec_engine.exec_with_output(p).map_err(|mut e| {
e.desc = format!("failed to run custom build command for `{}`\n{}",
pkg_name, e.desc);
Human(e)
}));
try!(paths::write(&build_output.parent().unwrap().join("output"),
&output.stdout));
// After the build command has finished running, we need to be sure to
// remember all of its output so we can later discover precisely what it
// was, even if we don't run the build command again (due to freshness).
//
// This is also the location where we provide feedback into the build
// state informing what variables were discovered via our script as
// well.
let output = try!(str::from_utf8(&output.stdout).map_err(|_| {
human("build script output was not valid utf-8")
}));
let parsed_output = try!(BuildOutput::parse(output, &pkg_name));
build_state.insert(id, kind, parsed_output);
Ok(())
});
// Now that we've prepared our work-to-do, we need to prepare the fresh work
// itself to run when we actually end up just discarding what we calculated
// above.
let fresh = Work::new(move |_tx| {
let (id, pkg_name, build_state, build_output) = all;
let contents = try!(paths::read(&build_output.parent().unwrap()
.join("output")));
let output = try!(BuildOutput::parse(&contents, &pkg_name));
build_state.insert(id, kind, output);
Ok(())
});
Ok((dirty, fresh))
}
impl BuildState {
pub fn new(config: &super::BuildConfig,
packages: &PackageSet) -> BuildState {
let mut sources = HashMap::new();
for package in packages.iter() {
match package.manifest().links() {
Some(links) => {
sources.insert(links.to_string(),
package.package_id().clone());
}
None => {}
}
}
let mut outputs = HashMap::new();
let i1 = config.host.overrides.iter().map(|p| (p, Kind::Host));
let i2 = config.target.overrides.iter().map(|p| (p, Kind::Target));
for ((name, output), kind) in i1.chain(i2) {
// If no package is using the library named `name`, then this is
// just an override that we ignore.
if let Some(id) = sources.get(name) {
outputs.insert((id.clone(), kind), output.clone());
}
}
BuildState { outputs: Mutex::new(outputs) }
}
fn insert(&self, id: PackageId, kind: Kind, output: BuildOutput) {
self.outputs.lock().unwrap().insert((id, kind), output);
}
}
impl BuildOutput {
// Parses the output of a script.
// The `pkg_name` is used for error messages.
pub fn parse(input: &str, pkg_name: &str) -> CargoResult<BuildOutput> {
let mut library_paths = Vec::new();
let mut library_links = Vec::new();
let mut cfgs = Vec::new();
let mut metadata = Vec::new();
let whence = format!("build script of `{}`", pkg_name);
for line in input.lines() {
let mut iter = line.splitn(2, ':');
if iter.next()!= Some("cargo") {
// skip this line since it doesn't start with "cargo:"
continue;
}
let data = match iter.next() {
Some(val) => val,
None => continue
};
// getting the `key=value` part of the line
let mut iter = data.splitn(2, '='); | // line started with `cargo:` but didn't match `key=value`
_ => bail!("Wrong output in {}: `{}`", whence, line),
};
match key {
"rustc-flags" => {
let (libs, links) = try!(
BuildOutput::parse_rustc_flags(value, &whence)
);
library_links.extend(links.into_iter());
library_paths.extend(libs.into_iter());
}
"rustc-link-lib" => library_links.push(value.to_string()),
"rustc-link-search" => library_paths.push(PathBuf::from(value)),
"rustc-cfg" => cfgs.push(value.to_string()),
_ => metadata.push((key.to_string(), value.to_string())),
}
}
Ok(BuildOutput {
library_paths: library_paths,
library_links: library_links,
cfgs: cfgs,
metadata: metadata,
})
}
pub fn parse_rustc_flags(value: &str, whence: &str)
-> CargoResult<(Vec<PathBuf>, Vec<String>)> {
let value = value.trim();
let mut flags_iter = value.split(|c: char| c.is_whitespace())
.filter(|w| w.chars().any(|c|!c.is_whitespace()));
let (mut library_links, mut library_paths) = (Vec::new(), Vec::new());
loop {
let flag = match flags_iter.next() {
Some(f) => f,
None => break
};
if flag!= "-l" && flag!= "-L" {
bail!("Only `-l` and `-L` flags are allowed in {}: `{}`",
whence, value)
}
let value = match flags_iter.next() {
Some(v) => v,
None => bail!("Flag in rustc-flags has no value in {}: `{}`",
whence, value)
};
match flag {
"-l" => library_links.push(value.to_string()),
"-L" => library_paths.push(PathBuf::from(value)),
// was already checked above
_ => bail!("only -l and -L flags are allowed")
};
}
Ok((library_paths, library_links))
}
}
/// Compute the `build_scripts` map in the `Context` which tracks what build
/// scripts each package depends on.
///
/// The global `build_scripts` map lists for all (package, kind) tuples what set
/// of packages' build script outputs must be considered. For example this lists
/// all dependencies' `-L` flags which need to be propagated transitively.
///
/// The given set of targets to this function is the initial set of
/// targets/profiles which are being built.
pub fn build_map<'b, 'cfg>(cx: &mut Context<'b, 'cfg>,
units: &[Unit<'b>]) {
let mut ret = HashMap::new();
for unit in units {
build(&mut ret, cx, unit);
}
cx.build_scripts.extend(ret.into_iter().map(|(k, v)| {
(k, Arc::new(v))
}));
// Recursive function to build up the map we're constructing. This function
// memoizes all of its return values as it goes along.
fn build<'a, 'b, 'cfg>(out: &'a mut HashMap<Unit<'b>, BuildScripts>,
cx: &Context<'b, 'cfg>,
unit: &Unit<'b>)
-> &'a BuildScripts {
// Do a quick pre-flight check to see if we've already calculated the
// set of dependencies.
if out.contains_key(unit) {
return &out[unit]
}
let mut to_link = BTreeSet::new();
let mut plugins = BTreeSet::new();
if!unit.target.is_custom_build() && unit.pkg.has_custom_build() {
to_link.insert((unit.pkg.package_id().clone(), unit.kind));
}
for unit in cx.dep_targets(unit).iter() {
let dep_scripts = build(out, cx, unit);
if unit.target.for_host() {
plugins.extend(dep_scripts.to_link.iter()
.map(|p| &p.0).cloned());
} else if unit.target.linkable() {
to_link.extend(dep_scripts.to_link.iter().cloned());
}
}
let prev = out.entry(*unit).or_insert(BuildScripts::default());
prev.to_link.extend(to_link);
prev.plugins.extend(plugins);
return prev
}
} | let key = iter.next();
let value = iter.next();
let (key, value) = match (key, value) {
(Some(a), Some(b)) => (a, b.trim_right()), | random_line_split |
time.rs | //! Utilities for mapping between human-usable time units and BAPS3's
//! preferred time units.
/// Enum of available time units.
///
/// This does not contain every possible time unit anyone may want to use with
/// a BAPS3 client, but covers the main possibilities.
///
/// Each unit specified in terms of its equivalent in microseconds, which is
/// the unit used 'over the wire' when talking to BAPS3.
#[derive(Copy)]
pub enum | {
/// Hours (1 hour = 60 minutes)
Hours,
/// Minutes (1 minute = 60 seconds).
Minutes,
/// Seconds (1 second = 1,000 milliseconds).
Seconds,
/// Milliseconds (1 millisecond = 1,000 microseconds).
Milliseconds,
/// Microseconds (the BAPS3 base unit).
Microseconds
}
impl TimeUnit {
/// Returns the suffix of the given unit.
///
/// This is mainly for use in human-readable times.
pub fn suffix(&self) -> &'static str {
match *self {
TimeUnit::Hours => "h",
TimeUnit::Minutes => "m",
TimeUnit::Seconds => "s",
TimeUnit::Milliseconds => "ms",
TimeUnit::Microseconds => "us"
}
}
/// Returns the equivalent of `n` of the given unit in microseconds.
pub fn as_micros(&self, n: u64) -> u64 {
match *self {
TimeUnit::Hours => n * 1000 * 1000 * 60 * 60,
TimeUnit::Minutes => n * 1000 * 1000 * 60,
TimeUnit::Seconds => n * 1000 * 1000,
TimeUnit::Milliseconds => n * 1000,
TimeUnit::Microseconds => n
}
}
/// Returns the equivalent of `n` microseconds in the given unit.
///
/// As the return value is an integer, there may be some rounding down.
///
/// # Examples
///
/// 1 million microseconds is equivalent to 1 second:
///
/// ```rust
/// use baps3_cli::time::TimeUnit;
/// assert_eq!(TimeUnit::Seconds.from_micros(1000000), 1)
/// ```
///
/// Translating one hour of time to microseconds and back is the identity:
///
/// ```rust
/// use baps3_cli::time::TimeUnit;
/// let hour_us = TimeUnit::Hours.as_micros(1);
/// assert_eq!(TimeUnit::Hours.from_micros(hour_us), 1)
/// ```
pub fn from_micros(&self, n: u64) -> u64 {
match *self {
TimeUnit::Hours => n / 1000 / 1000 / 60 / 60,
TimeUnit::Minutes => n / 1000 / 1000 / 60,
TimeUnit::Seconds => n / 1000 / 1000,
TimeUnit::Milliseconds => n / 1000,
TimeUnit::Microseconds => n
}
}
/// Multiplexes a series of unit flags into a TimeUnit.
/// Larger units take precedence.
pub fn from_flags(h: bool, m: bool, s: bool, ms: bool) -> TimeUnit {
if h { TimeUnit::Hours }
else if m { TimeUnit::Minutes }
else if s { TimeUnit::Seconds }
else if ms { TimeUnit::Milliseconds }
else { TimeUnit::Microseconds }
}
} | TimeUnit | identifier_name |
time.rs | //! Utilities for mapping between human-usable time units and BAPS3's
//! preferred time units.
/// Enum of available time units.
///
/// This does not contain every possible time unit anyone may want to use with
/// a BAPS3 client, but covers the main possibilities.
///
/// Each unit specified in terms of its equivalent in microseconds, which is
/// the unit used 'over the wire' when talking to BAPS3.
#[derive(Copy)]
pub enum TimeUnit {
/// Hours (1 hour = 60 minutes)
Hours,
/// Minutes (1 minute = 60 seconds).
Minutes,
/// Seconds (1 second = 1,000 milliseconds).
Seconds,
/// Milliseconds (1 millisecond = 1,000 microseconds).
Milliseconds,
/// Microseconds (the BAPS3 base unit).
Microseconds
}
impl TimeUnit {
/// Returns the suffix of the given unit.
///
/// This is mainly for use in human-readable times.
pub fn suffix(&self) -> &'static str {
match *self {
TimeUnit::Hours => "h",
TimeUnit::Minutes => "m",
TimeUnit::Seconds => "s",
TimeUnit::Milliseconds => "ms",
TimeUnit::Microseconds => "us"
}
}
/// Returns the equivalent of `n` of the given unit in microseconds.
pub fn as_micros(&self, n: u64) -> u64 {
match *self {
TimeUnit::Hours => n * 1000 * 1000 * 60 * 60,
TimeUnit::Minutes => n * 1000 * 1000 * 60,
TimeUnit::Seconds => n * 1000 * 1000,
TimeUnit::Milliseconds => n * 1000,
TimeUnit::Microseconds => n
}
}
/// Returns the equivalent of `n` microseconds in the given unit.
///
/// As the return value is an integer, there may be some rounding down.
///
/// # Examples
///
/// 1 million microseconds is equivalent to 1 second:
///
/// ```rust
/// use baps3_cli::time::TimeUnit;
/// assert_eq!(TimeUnit::Seconds.from_micros(1000000), 1)
/// ```
///
/// Translating one hour of time to microseconds and back is the identity:
///
/// ```rust
/// use baps3_cli::time::TimeUnit;
/// let hour_us = TimeUnit::Hours.as_micros(1);
/// assert_eq!(TimeUnit::Hours.from_micros(hour_us), 1)
/// ```
pub fn from_micros(&self, n: u64) -> u64 {
match *self {
TimeUnit::Hours => n / 1000 / 1000 / 60 / 60,
TimeUnit::Minutes => n / 1000 / 1000 / 60,
TimeUnit::Seconds => n / 1000 / 1000,
TimeUnit::Milliseconds => n / 1000,
TimeUnit::Microseconds => n
}
} |
/// Multiplexes a series of unit flags into a TimeUnit.
/// Larger units take precedence.
pub fn from_flags(h: bool, m: bool, s: bool, ms: bool) -> TimeUnit {
if h { TimeUnit::Hours }
else if m { TimeUnit::Minutes }
else if s { TimeUnit::Seconds }
else if ms { TimeUnit::Milliseconds }
else { TimeUnit::Microseconds }
}
} | random_line_split |
|
time.rs | //! Utilities for mapping between human-usable time units and BAPS3's
//! preferred time units.
/// Enum of available time units.
///
/// This does not contain every possible time unit anyone may want to use with
/// a BAPS3 client, but covers the main possibilities.
///
/// Each unit specified in terms of its equivalent in microseconds, which is
/// the unit used 'over the wire' when talking to BAPS3.
#[derive(Copy)]
pub enum TimeUnit {
/// Hours (1 hour = 60 minutes)
Hours,
/// Minutes (1 minute = 60 seconds).
Minutes,
/// Seconds (1 second = 1,000 milliseconds).
Seconds,
/// Milliseconds (1 millisecond = 1,000 microseconds).
Milliseconds,
/// Microseconds (the BAPS3 base unit).
Microseconds
}
impl TimeUnit {
/// Returns the suffix of the given unit.
///
/// This is mainly for use in human-readable times.
pub fn suffix(&self) -> &'static str {
match *self {
TimeUnit::Hours => "h",
TimeUnit::Minutes => "m",
TimeUnit::Seconds => "s",
TimeUnit::Milliseconds => "ms",
TimeUnit::Microseconds => "us"
}
}
/// Returns the equivalent of `n` of the given unit in microseconds.
pub fn as_micros(&self, n: u64) -> u64 {
match *self {
TimeUnit::Hours => n * 1000 * 1000 * 60 * 60,
TimeUnit::Minutes => n * 1000 * 1000 * 60,
TimeUnit::Seconds => n * 1000 * 1000,
TimeUnit::Milliseconds => n * 1000,
TimeUnit::Microseconds => n
}
}
/// Returns the equivalent of `n` microseconds in the given unit.
///
/// As the return value is an integer, there may be some rounding down.
///
/// # Examples
///
/// 1 million microseconds is equivalent to 1 second:
///
/// ```rust
/// use baps3_cli::time::TimeUnit;
/// assert_eq!(TimeUnit::Seconds.from_micros(1000000), 1)
/// ```
///
/// Translating one hour of time to microseconds and back is the identity:
///
/// ```rust
/// use baps3_cli::time::TimeUnit;
/// let hour_us = TimeUnit::Hours.as_micros(1);
/// assert_eq!(TimeUnit::Hours.from_micros(hour_us), 1)
/// ```
pub fn from_micros(&self, n: u64) -> u64 {
match *self {
TimeUnit::Hours => n / 1000 / 1000 / 60 / 60,
TimeUnit::Minutes => n / 1000 / 1000 / 60,
TimeUnit::Seconds => n / 1000 / 1000,
TimeUnit::Milliseconds => n / 1000,
TimeUnit::Microseconds => n
}
}
/// Multiplexes a series of unit flags into a TimeUnit.
/// Larger units take precedence.
pub fn from_flags(h: bool, m: bool, s: bool, ms: bool) -> TimeUnit |
} | {
if h { TimeUnit::Hours }
else if m { TimeUnit::Minutes }
else if s { TimeUnit::Seconds }
else if ms { TimeUnit::Milliseconds }
else { TimeUnit::Microseconds }
} | identifier_body |
time.rs | //! Utilities for mapping between human-usable time units and BAPS3's
//! preferred time units.
/// Enum of available time units.
///
/// This does not contain every possible time unit anyone may want to use with
/// a BAPS3 client, but covers the main possibilities.
///
/// Each unit specified in terms of its equivalent in microseconds, which is
/// the unit used 'over the wire' when talking to BAPS3.
#[derive(Copy)]
pub enum TimeUnit {
/// Hours (1 hour = 60 minutes)
Hours,
/// Minutes (1 minute = 60 seconds).
Minutes,
/// Seconds (1 second = 1,000 milliseconds).
Seconds,
/// Milliseconds (1 millisecond = 1,000 microseconds).
Milliseconds,
/// Microseconds (the BAPS3 base unit).
Microseconds
}
impl TimeUnit {
/// Returns the suffix of the given unit.
///
/// This is mainly for use in human-readable times.
pub fn suffix(&self) -> &'static str {
match *self {
TimeUnit::Hours => "h",
TimeUnit::Minutes => "m",
TimeUnit::Seconds => "s",
TimeUnit::Milliseconds => "ms",
TimeUnit::Microseconds => "us"
}
}
/// Returns the equivalent of `n` of the given unit in microseconds.
pub fn as_micros(&self, n: u64) -> u64 {
match *self {
TimeUnit::Hours => n * 1000 * 1000 * 60 * 60,
TimeUnit::Minutes => n * 1000 * 1000 * 60,
TimeUnit::Seconds => n * 1000 * 1000,
TimeUnit::Milliseconds => n * 1000,
TimeUnit::Microseconds => n
}
}
/// Returns the equivalent of `n` microseconds in the given unit.
///
/// As the return value is an integer, there may be some rounding down.
///
/// # Examples
///
/// 1 million microseconds is equivalent to 1 second:
///
/// ```rust
/// use baps3_cli::time::TimeUnit;
/// assert_eq!(TimeUnit::Seconds.from_micros(1000000), 1)
/// ```
///
/// Translating one hour of time to microseconds and back is the identity:
///
/// ```rust
/// use baps3_cli::time::TimeUnit;
/// let hour_us = TimeUnit::Hours.as_micros(1);
/// assert_eq!(TimeUnit::Hours.from_micros(hour_us), 1)
/// ```
pub fn from_micros(&self, n: u64) -> u64 {
match *self {
TimeUnit::Hours => n / 1000 / 1000 / 60 / 60,
TimeUnit::Minutes => n / 1000 / 1000 / 60,
TimeUnit::Seconds => n / 1000 / 1000,
TimeUnit::Milliseconds => n / 1000,
TimeUnit::Microseconds => n
}
}
/// Multiplexes a series of unit flags into a TimeUnit.
/// Larger units take precedence.
pub fn from_flags(h: bool, m: bool, s: bool, ms: bool) -> TimeUnit {
if h { TimeUnit::Hours }
else if m { TimeUnit::Minutes }
else if s { TimeUnit::Seconds }
else if ms |
else { TimeUnit::Microseconds }
}
} | { TimeUnit::Milliseconds } | conditional_block |
mem.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use kvproto::encryptionpb::EncryptedContent;
use super::metadata::*;
use crate::crypter::*;
use crate::{AesGcmCrypter, Error, Iv, Result};
/// An in-memory backend, it saves master key in memory.
pub(crate) struct MemAesGcmBackend {
pub key: Vec<u8>,
}
impl MemAesGcmBackend {
pub fn new(key: Vec<u8>) -> Result<MemAesGcmBackend> {
if key.len()!= AesGcmCrypter::KEY_LEN {
return Err(box_err!(
"encryption method and key length mismatch, expect {} get {}",
AesGcmCrypter::KEY_LEN,
key.len()
));
}
Ok(MemAesGcmBackend { key })
}
pub fn encrypt_content(&self, plaintext: &[u8], iv: Iv) -> Result<EncryptedContent> {
let mut content = EncryptedContent::default();
content.mut_metadata().insert(
MetadataKey::Method.as_str().to_owned(),
MetadataMethod::Aes256Gcm.as_slice().to_vec(),
);
let iv_value = iv.as_slice().to_vec();
content
.mut_metadata()
.insert(MetadataKey::Iv.as_str().to_owned(), iv_value);
let (ciphertext, gcm_tag) = AesGcmCrypter::new(&self.key, iv).encrypt(plaintext)?;
content.set_content(ciphertext);
content.mut_metadata().insert(
MetadataKey::AesGcmTag.as_str().to_owned(),
gcm_tag.as_slice().to_owned(),
);
Ok(content)
}
// On decrypt failure, the rule is to return WrongMasterKey error in case it is possible that
// a wrong master key has been used, or other error otherwise.
pub fn decrypt_content(&self, content: &EncryptedContent) -> Result<Vec<u8>> {
let method = content
.get_metadata()
.get(MetadataKey::Method.as_str())
.ok_or_else(|| {
// Missing method in metadata. The metadata of the encrypted content is invalid or
// corrupted.
Error::Other(box_err!(
"metadata {} not found",
MetadataKey::Method.as_str()
))
})?;
if method.as_slice()!= MetadataMethod::Aes256Gcm.as_slice() |
let key = &self.key;
let iv_value = content
.get_metadata()
.get(MetadataKey::Iv.as_str())
.ok_or_else(|| {
// IV is missing. The metadata of the encrypted content is invalid or corrupted.
Error::Other(box_err!("metadata {} not found", MetadataKey::Iv.as_str()))
})?;
let iv = Iv::from_slice(iv_value.as_slice())?;
let tag = content
.get_metadata()
.get(MetadataKey::AesGcmTag.as_str())
.ok_or_else(|| {
// Tag is missing. The metadata of the encrypted content is invalid or corrupted.
Error::Other(box_err!("gcm tag not found"))
})?;
let gcm_tag = AesGcmTag::from(tag.as_slice());
let ciphertext = content.get_content();
let plaintext = AesGcmCrypter::new(key, iv)
.decrypt(ciphertext, gcm_tag)
.map_err(|e|
// Decryption error, likely caused by mismatched tag. It could be the tag is
// corrupted, or the encrypted content is fake by an attacker, but more likely
// it is caused by a wrong master key being used.
Error::WrongMasterKey(box_err!("decrypt in GCM mode failed: {}", e)))?;
Ok(plaintext)
}
}
#[cfg(test)]
mod tests {
use hex::FromHex;
use matches::assert_matches;
use super::*;
#[test]
fn test_mem_backend_ase_256_gcm() {
// See more http://csrc.nist.gov/groups/STM/cavp/documents/mac/gcmtestvectors.zip
let pt = Vec::from_hex("25431587e9ecffc7c37f8d6d52a9bc3310651d46fb0e3bad2726c8f2db653749")
.unwrap();
let ct = Vec::from_hex("84e5f23f95648fa247cb28eef53abec947dbf05ac953734618111583840bd980")
.unwrap();
let key = Vec::from_hex("c3d99825f2181f4808acd2068eac7441a65bd428f14d2aab43fefc0129091139")
.unwrap();
let iv = Vec::from_hex("cafabd9672ca6c79a2fbdc22").unwrap();
let backend = MemAesGcmBackend::new(key).unwrap();
let iv = Iv::from_slice(iv.as_slice()).unwrap();
let encrypted_content = backend.encrypt_content(&pt, iv).unwrap();
assert_eq!(encrypted_content.get_content(), ct.as_slice());
let plaintext = backend.decrypt_content(&encrypted_content).unwrap();
assert_eq!(plaintext, pt);
}
#[test]
fn test_mem_backend_authenticate() {
let pt = vec![1u8, 2, 3];
let key = Vec::from_hex("603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4")
.unwrap();
let backend = MemAesGcmBackend::new(key).unwrap();
let encrypted_content = backend.encrypt_content(&pt, Iv::new_gcm()).unwrap();
let plaintext = backend.decrypt_content(&encrypted_content).unwrap();
assert_eq!(plaintext, pt);
// Must fail is method not found.
let mut encrypted_content_missing_method = encrypted_content.clone();
encrypted_content_missing_method
.mut_metadata()
.remove(MetadataKey::Method.as_str());
assert_matches!(
backend
.decrypt_content(&encrypted_content_missing_method)
.unwrap_err(),
Error::Other(_)
);
// Must fail if method is not aes256-gcm.
let mut encrypted_content_invalid_method = encrypted_content.clone();
let mut invalid_suffix = b"_invalid".to_vec();
encrypted_content_invalid_method
.mut_metadata()
.get_mut(MetadataKey::Method.as_str())
.unwrap()
.append(&mut invalid_suffix);
assert_matches!(
backend
.decrypt_content(&encrypted_content_invalid_method)
.unwrap_err(),
Error::Other(_)
);
// Must fail if tag not found.
let mut encrypted_content_missing_tag = encrypted_content.clone();
encrypted_content_missing_tag
.mut_metadata()
.remove(MetadataKey::AesGcmTag.as_str());
assert_matches!(
backend
.decrypt_content(&encrypted_content_missing_tag)
.unwrap_err(),
Error::Other(_)
);
// Must fail with WrongMasterKey error due to mismatched tag.
let mut encrypted_content_mismatch_tag = encrypted_content;
encrypted_content_mismatch_tag
.mut_metadata()
.get_mut(MetadataKey::AesGcmTag.as_str())
.unwrap()[0] ^= 0b11111111u8;
assert_matches!(
backend
.decrypt_content(&encrypted_content_mismatch_tag)
.unwrap_err(),
Error::WrongMasterKey(_)
);
}
}
| {
// Currently we only support aes256-gcm. A different method could mean the encrypted
// content is written by a future version of TiKV, and we don't know how to handle it.
// Fail immediately instead of fallback to previous key.
return Err(Error::Other(box_err!(
"encryption method mismatch, expected {:?} vs actual {:?}",
MetadataMethod::Aes256Gcm.as_slice(),
method
)));
} | conditional_block |
mem.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use kvproto::encryptionpb::EncryptedContent;
use super::metadata::*;
use crate::crypter::*;
use crate::{AesGcmCrypter, Error, Iv, Result};
/// An in-memory backend, it saves master key in memory.
pub(crate) struct MemAesGcmBackend {
pub key: Vec<u8>,
}
impl MemAesGcmBackend {
pub fn new(key: Vec<u8>) -> Result<MemAesGcmBackend> {
if key.len()!= AesGcmCrypter::KEY_LEN {
return Err(box_err!(
"encryption method and key length mismatch, expect {} get {}",
AesGcmCrypter::KEY_LEN,
key.len()
));
}
Ok(MemAesGcmBackend { key })
}
pub fn encrypt_content(&self, plaintext: &[u8], iv: Iv) -> Result<EncryptedContent> {
let mut content = EncryptedContent::default();
content.mut_metadata().insert(
MetadataKey::Method.as_str().to_owned(),
MetadataMethod::Aes256Gcm.as_slice().to_vec(),
);
let iv_value = iv.as_slice().to_vec();
content
.mut_metadata()
.insert(MetadataKey::Iv.as_str().to_owned(), iv_value);
let (ciphertext, gcm_tag) = AesGcmCrypter::new(&self.key, iv).encrypt(plaintext)?;
content.set_content(ciphertext);
content.mut_metadata().insert(
MetadataKey::AesGcmTag.as_str().to_owned(),
gcm_tag.as_slice().to_owned(),
);
Ok(content)
}
// On decrypt failure, the rule is to return WrongMasterKey error in case it is possible that
// a wrong master key has been used, or other error otherwise.
pub fn decrypt_content(&self, content: &EncryptedContent) -> Result<Vec<u8>> {
let method = content
.get_metadata()
.get(MetadataKey::Method.as_str())
.ok_or_else(|| {
// Missing method in metadata. The metadata of the encrypted content is invalid or
// corrupted.
Error::Other(box_err!(
"metadata {} not found",
MetadataKey::Method.as_str()
))
})?;
if method.as_slice()!= MetadataMethod::Aes256Gcm.as_slice() {
// Currently we only support aes256-gcm. A different method could mean the encrypted
// content is written by a future version of TiKV, and we don't know how to handle it.
// Fail immediately instead of fallback to previous key.
return Err(Error::Other(box_err!(
"encryption method mismatch, expected {:?} vs actual {:?}",
MetadataMethod::Aes256Gcm.as_slice(),
method
)));
}
let key = &self.key;
let iv_value = content
.get_metadata()
.get(MetadataKey::Iv.as_str())
.ok_or_else(|| {
// IV is missing. The metadata of the encrypted content is invalid or corrupted.
Error::Other(box_err!("metadata {} not found", MetadataKey::Iv.as_str()))
})?;
let iv = Iv::from_slice(iv_value.as_slice())?;
let tag = content
.get_metadata()
.get(MetadataKey::AesGcmTag.as_str())
.ok_or_else(|| {
// Tag is missing. The metadata of the encrypted content is invalid or corrupted.
Error::Other(box_err!("gcm tag not found"))
})?;
let gcm_tag = AesGcmTag::from(tag.as_slice());
let ciphertext = content.get_content();
let plaintext = AesGcmCrypter::new(key, iv)
.decrypt(ciphertext, gcm_tag)
.map_err(|e|
// Decryption error, likely caused by mismatched tag. It could be the tag is
// corrupted, or the encrypted content is fake by an attacker, but more likely
// it is caused by a wrong master key being used.
Error::WrongMasterKey(box_err!("decrypt in GCM mode failed: {}", e)))?;
Ok(plaintext)
}
}
#[cfg(test)]
mod tests {
use hex::FromHex;
use matches::assert_matches;
use super::*;
#[test]
fn test_mem_backend_ase_256_gcm() |
#[test]
fn test_mem_backend_authenticate() {
let pt = vec![1u8, 2, 3];
let key = Vec::from_hex("603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4")
.unwrap();
let backend = MemAesGcmBackend::new(key).unwrap();
let encrypted_content = backend.encrypt_content(&pt, Iv::new_gcm()).unwrap();
let plaintext = backend.decrypt_content(&encrypted_content).unwrap();
assert_eq!(plaintext, pt);
// Must fail is method not found.
let mut encrypted_content_missing_method = encrypted_content.clone();
encrypted_content_missing_method
.mut_metadata()
.remove(MetadataKey::Method.as_str());
assert_matches!(
backend
.decrypt_content(&encrypted_content_missing_method)
.unwrap_err(),
Error::Other(_)
);
// Must fail if method is not aes256-gcm.
let mut encrypted_content_invalid_method = encrypted_content.clone();
let mut invalid_suffix = b"_invalid".to_vec();
encrypted_content_invalid_method
.mut_metadata()
.get_mut(MetadataKey::Method.as_str())
.unwrap()
.append(&mut invalid_suffix);
assert_matches!(
backend
.decrypt_content(&encrypted_content_invalid_method)
.unwrap_err(),
Error::Other(_)
);
// Must fail if tag not found.
let mut encrypted_content_missing_tag = encrypted_content.clone();
encrypted_content_missing_tag
.mut_metadata()
.remove(MetadataKey::AesGcmTag.as_str());
assert_matches!(
backend
.decrypt_content(&encrypted_content_missing_tag)
.unwrap_err(),
Error::Other(_)
);
// Must fail with WrongMasterKey error due to mismatched tag.
let mut encrypted_content_mismatch_tag = encrypted_content;
encrypted_content_mismatch_tag
.mut_metadata()
.get_mut(MetadataKey::AesGcmTag.as_str())
.unwrap()[0] ^= 0b11111111u8;
assert_matches!(
backend
.decrypt_content(&encrypted_content_mismatch_tag)
.unwrap_err(),
Error::WrongMasterKey(_)
);
}
}
| {
// See more http://csrc.nist.gov/groups/STM/cavp/documents/mac/gcmtestvectors.zip
let pt = Vec::from_hex("25431587e9ecffc7c37f8d6d52a9bc3310651d46fb0e3bad2726c8f2db653749")
.unwrap();
let ct = Vec::from_hex("84e5f23f95648fa247cb28eef53abec947dbf05ac953734618111583840bd980")
.unwrap();
let key = Vec::from_hex("c3d99825f2181f4808acd2068eac7441a65bd428f14d2aab43fefc0129091139")
.unwrap();
let iv = Vec::from_hex("cafabd9672ca6c79a2fbdc22").unwrap();
let backend = MemAesGcmBackend::new(key).unwrap();
let iv = Iv::from_slice(iv.as_slice()).unwrap();
let encrypted_content = backend.encrypt_content(&pt, iv).unwrap();
assert_eq!(encrypted_content.get_content(), ct.as_slice());
let plaintext = backend.decrypt_content(&encrypted_content).unwrap();
assert_eq!(plaintext, pt);
} | identifier_body |
mem.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use kvproto::encryptionpb::EncryptedContent;
use super::metadata::*;
use crate::crypter::*;
use crate::{AesGcmCrypter, Error, Iv, Result};
/// An in-memory backend, it saves master key in memory.
pub(crate) struct MemAesGcmBackend {
pub key: Vec<u8>,
}
impl MemAesGcmBackend {
pub fn new(key: Vec<u8>) -> Result<MemAesGcmBackend> {
if key.len()!= AesGcmCrypter::KEY_LEN {
return Err(box_err!(
"encryption method and key length mismatch, expect {} get {}",
AesGcmCrypter::KEY_LEN,
key.len()
));
}
Ok(MemAesGcmBackend { key })
}
pub fn encrypt_content(&self, plaintext: &[u8], iv: Iv) -> Result<EncryptedContent> {
let mut content = EncryptedContent::default();
content.mut_metadata().insert(
MetadataKey::Method.as_str().to_owned(),
MetadataMethod::Aes256Gcm.as_slice().to_vec(),
);
let iv_value = iv.as_slice().to_vec();
content
.mut_metadata()
.insert(MetadataKey::Iv.as_str().to_owned(), iv_value);
let (ciphertext, gcm_tag) = AesGcmCrypter::new(&self.key, iv).encrypt(plaintext)?;
content.set_content(ciphertext);
content.mut_metadata().insert(
MetadataKey::AesGcmTag.as_str().to_owned(),
gcm_tag.as_slice().to_owned(),
);
Ok(content)
}
// On decrypt failure, the rule is to return WrongMasterKey error in case it is possible that
// a wrong master key has been used, or other error otherwise.
pub fn decrypt_content(&self, content: &EncryptedContent) -> Result<Vec<u8>> {
let method = content
.get_metadata()
.get(MetadataKey::Method.as_str())
.ok_or_else(|| {
// Missing method in metadata. The metadata of the encrypted content is invalid or
// corrupted.
Error::Other(box_err!(
"metadata {} not found",
MetadataKey::Method.as_str()
))
})?;
if method.as_slice()!= MetadataMethod::Aes256Gcm.as_slice() {
// Currently we only support aes256-gcm. A different method could mean the encrypted
// content is written by a future version of TiKV, and we don't know how to handle it.
// Fail immediately instead of fallback to previous key.
return Err(Error::Other(box_err!(
"encryption method mismatch, expected {:?} vs actual {:?}",
MetadataMethod::Aes256Gcm.as_slice(),
method
)));
}
let key = &self.key;
let iv_value = content
.get_metadata()
.get(MetadataKey::Iv.as_str())
.ok_or_else(|| {
// IV is missing. The metadata of the encrypted content is invalid or corrupted.
Error::Other(box_err!("metadata {} not found", MetadataKey::Iv.as_str()))
})?;
let iv = Iv::from_slice(iv_value.as_slice())?;
let tag = content
.get_metadata()
.get(MetadataKey::AesGcmTag.as_str())
.ok_or_else(|| { | let gcm_tag = AesGcmTag::from(tag.as_slice());
let ciphertext = content.get_content();
let plaintext = AesGcmCrypter::new(key, iv)
.decrypt(ciphertext, gcm_tag)
.map_err(|e|
// Decryption error, likely caused by mismatched tag. It could be the tag is
// corrupted, or the encrypted content is fake by an attacker, but more likely
// it is caused by a wrong master key being used.
Error::WrongMasterKey(box_err!("decrypt in GCM mode failed: {}", e)))?;
Ok(plaintext)
}
}
#[cfg(test)]
mod tests {
use hex::FromHex;
use matches::assert_matches;
use super::*;
#[test]
fn test_mem_backend_ase_256_gcm() {
// See more http://csrc.nist.gov/groups/STM/cavp/documents/mac/gcmtestvectors.zip
let pt = Vec::from_hex("25431587e9ecffc7c37f8d6d52a9bc3310651d46fb0e3bad2726c8f2db653749")
.unwrap();
let ct = Vec::from_hex("84e5f23f95648fa247cb28eef53abec947dbf05ac953734618111583840bd980")
.unwrap();
let key = Vec::from_hex("c3d99825f2181f4808acd2068eac7441a65bd428f14d2aab43fefc0129091139")
.unwrap();
let iv = Vec::from_hex("cafabd9672ca6c79a2fbdc22").unwrap();
let backend = MemAesGcmBackend::new(key).unwrap();
let iv = Iv::from_slice(iv.as_slice()).unwrap();
let encrypted_content = backend.encrypt_content(&pt, iv).unwrap();
assert_eq!(encrypted_content.get_content(), ct.as_slice());
let plaintext = backend.decrypt_content(&encrypted_content).unwrap();
assert_eq!(plaintext, pt);
}
#[test]
fn test_mem_backend_authenticate() {
let pt = vec![1u8, 2, 3];
let key = Vec::from_hex("603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4")
.unwrap();
let backend = MemAesGcmBackend::new(key).unwrap();
let encrypted_content = backend.encrypt_content(&pt, Iv::new_gcm()).unwrap();
let plaintext = backend.decrypt_content(&encrypted_content).unwrap();
assert_eq!(plaintext, pt);
// Must fail is method not found.
let mut encrypted_content_missing_method = encrypted_content.clone();
encrypted_content_missing_method
.mut_metadata()
.remove(MetadataKey::Method.as_str());
assert_matches!(
backend
.decrypt_content(&encrypted_content_missing_method)
.unwrap_err(),
Error::Other(_)
);
// Must fail if method is not aes256-gcm.
let mut encrypted_content_invalid_method = encrypted_content.clone();
let mut invalid_suffix = b"_invalid".to_vec();
encrypted_content_invalid_method
.mut_metadata()
.get_mut(MetadataKey::Method.as_str())
.unwrap()
.append(&mut invalid_suffix);
assert_matches!(
backend
.decrypt_content(&encrypted_content_invalid_method)
.unwrap_err(),
Error::Other(_)
);
// Must fail if tag not found.
let mut encrypted_content_missing_tag = encrypted_content.clone();
encrypted_content_missing_tag
.mut_metadata()
.remove(MetadataKey::AesGcmTag.as_str());
assert_matches!(
backend
.decrypt_content(&encrypted_content_missing_tag)
.unwrap_err(),
Error::Other(_)
);
// Must fail with WrongMasterKey error due to mismatched tag.
let mut encrypted_content_mismatch_tag = encrypted_content;
encrypted_content_mismatch_tag
.mut_metadata()
.get_mut(MetadataKey::AesGcmTag.as_str())
.unwrap()[0] ^= 0b11111111u8;
assert_matches!(
backend
.decrypt_content(&encrypted_content_mismatch_tag)
.unwrap_err(),
Error::WrongMasterKey(_)
);
}
} | // Tag is missing. The metadata of the encrypted content is invalid or corrupted.
Error::Other(box_err!("gcm tag not found"))
})?; | random_line_split |
mem.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use kvproto::encryptionpb::EncryptedContent;
use super::metadata::*;
use crate::crypter::*;
use crate::{AesGcmCrypter, Error, Iv, Result};
/// An in-memory backend, it saves master key in memory.
pub(crate) struct MemAesGcmBackend {
pub key: Vec<u8>,
}
impl MemAesGcmBackend {
pub fn new(key: Vec<u8>) -> Result<MemAesGcmBackend> {
if key.len()!= AesGcmCrypter::KEY_LEN {
return Err(box_err!(
"encryption method and key length mismatch, expect {} get {}",
AesGcmCrypter::KEY_LEN,
key.len()
));
}
Ok(MemAesGcmBackend { key })
}
pub fn | (&self, plaintext: &[u8], iv: Iv) -> Result<EncryptedContent> {
let mut content = EncryptedContent::default();
content.mut_metadata().insert(
MetadataKey::Method.as_str().to_owned(),
MetadataMethod::Aes256Gcm.as_slice().to_vec(),
);
let iv_value = iv.as_slice().to_vec();
content
.mut_metadata()
.insert(MetadataKey::Iv.as_str().to_owned(), iv_value);
let (ciphertext, gcm_tag) = AesGcmCrypter::new(&self.key, iv).encrypt(plaintext)?;
content.set_content(ciphertext);
content.mut_metadata().insert(
MetadataKey::AesGcmTag.as_str().to_owned(),
gcm_tag.as_slice().to_owned(),
);
Ok(content)
}
// On decrypt failure, the rule is to return WrongMasterKey error in case it is possible that
// a wrong master key has been used, or other error otherwise.
pub fn decrypt_content(&self, content: &EncryptedContent) -> Result<Vec<u8>> {
let method = content
.get_metadata()
.get(MetadataKey::Method.as_str())
.ok_or_else(|| {
// Missing method in metadata. The metadata of the encrypted content is invalid or
// corrupted.
Error::Other(box_err!(
"metadata {} not found",
MetadataKey::Method.as_str()
))
})?;
if method.as_slice()!= MetadataMethod::Aes256Gcm.as_slice() {
// Currently we only support aes256-gcm. A different method could mean the encrypted
// content is written by a future version of TiKV, and we don't know how to handle it.
// Fail immediately instead of fallback to previous key.
return Err(Error::Other(box_err!(
"encryption method mismatch, expected {:?} vs actual {:?}",
MetadataMethod::Aes256Gcm.as_slice(),
method
)));
}
let key = &self.key;
let iv_value = content
.get_metadata()
.get(MetadataKey::Iv.as_str())
.ok_or_else(|| {
// IV is missing. The metadata of the encrypted content is invalid or corrupted.
Error::Other(box_err!("metadata {} not found", MetadataKey::Iv.as_str()))
})?;
let iv = Iv::from_slice(iv_value.as_slice())?;
let tag = content
.get_metadata()
.get(MetadataKey::AesGcmTag.as_str())
.ok_or_else(|| {
// Tag is missing. The metadata of the encrypted content is invalid or corrupted.
Error::Other(box_err!("gcm tag not found"))
})?;
let gcm_tag = AesGcmTag::from(tag.as_slice());
let ciphertext = content.get_content();
let plaintext = AesGcmCrypter::new(key, iv)
.decrypt(ciphertext, gcm_tag)
.map_err(|e|
// Decryption error, likely caused by mismatched tag. It could be the tag is
// corrupted, or the encrypted content is fake by an attacker, but more likely
// it is caused by a wrong master key being used.
Error::WrongMasterKey(box_err!("decrypt in GCM mode failed: {}", e)))?;
Ok(plaintext)
}
}
#[cfg(test)]
mod tests {
use hex::FromHex;
use matches::assert_matches;
use super::*;
#[test]
fn test_mem_backend_ase_256_gcm() {
// See more http://csrc.nist.gov/groups/STM/cavp/documents/mac/gcmtestvectors.zip
let pt = Vec::from_hex("25431587e9ecffc7c37f8d6d52a9bc3310651d46fb0e3bad2726c8f2db653749")
.unwrap();
let ct = Vec::from_hex("84e5f23f95648fa247cb28eef53abec947dbf05ac953734618111583840bd980")
.unwrap();
let key = Vec::from_hex("c3d99825f2181f4808acd2068eac7441a65bd428f14d2aab43fefc0129091139")
.unwrap();
let iv = Vec::from_hex("cafabd9672ca6c79a2fbdc22").unwrap();
let backend = MemAesGcmBackend::new(key).unwrap();
let iv = Iv::from_slice(iv.as_slice()).unwrap();
let encrypted_content = backend.encrypt_content(&pt, iv).unwrap();
assert_eq!(encrypted_content.get_content(), ct.as_slice());
let plaintext = backend.decrypt_content(&encrypted_content).unwrap();
assert_eq!(plaintext, pt);
}
#[test]
fn test_mem_backend_authenticate() {
let pt = vec![1u8, 2, 3];
let key = Vec::from_hex("603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4")
.unwrap();
let backend = MemAesGcmBackend::new(key).unwrap();
let encrypted_content = backend.encrypt_content(&pt, Iv::new_gcm()).unwrap();
let plaintext = backend.decrypt_content(&encrypted_content).unwrap();
assert_eq!(plaintext, pt);
// Must fail is method not found.
let mut encrypted_content_missing_method = encrypted_content.clone();
encrypted_content_missing_method
.mut_metadata()
.remove(MetadataKey::Method.as_str());
assert_matches!(
backend
.decrypt_content(&encrypted_content_missing_method)
.unwrap_err(),
Error::Other(_)
);
// Must fail if method is not aes256-gcm.
let mut encrypted_content_invalid_method = encrypted_content.clone();
let mut invalid_suffix = b"_invalid".to_vec();
encrypted_content_invalid_method
.mut_metadata()
.get_mut(MetadataKey::Method.as_str())
.unwrap()
.append(&mut invalid_suffix);
assert_matches!(
backend
.decrypt_content(&encrypted_content_invalid_method)
.unwrap_err(),
Error::Other(_)
);
// Must fail if tag not found.
let mut encrypted_content_missing_tag = encrypted_content.clone();
encrypted_content_missing_tag
.mut_metadata()
.remove(MetadataKey::AesGcmTag.as_str());
assert_matches!(
backend
.decrypt_content(&encrypted_content_missing_tag)
.unwrap_err(),
Error::Other(_)
);
// Must fail with WrongMasterKey error due to mismatched tag.
let mut encrypted_content_mismatch_tag = encrypted_content;
encrypted_content_mismatch_tag
.mut_metadata()
.get_mut(MetadataKey::AesGcmTag.as_str())
.unwrap()[0] ^= 0b11111111u8;
assert_matches!(
backend
.decrypt_content(&encrypted_content_mismatch_tag)
.unwrap_err(),
Error::WrongMasterKey(_)
);
}
}
| encrypt_content | identifier_name |
003.rs | #![feature(slicing_syntax)]
extern crate test;
extern crate time;
| use std::os;
fn solution() -> u64 {
let mut n = 600_851_475_143;
for factor in iter::count(3, 2) {
while n % factor == 0 {
n /= factor;
}
if factor * factor > n {
return n;
} else if n == 1 {
return factor;
}
}
unreachable!();
}
fn main() {
match os::args()[] {
[_, ref flag] if flag[] == "-a" => return println!("{}", solution()),
_ => {},
}
for line in stdio::stdin().lock().lines() {
let iters: u64 = line.unwrap()[].trim().parse().unwrap();
let start = time::precise_time_ns();
for _ in range(0, iters) {
test::black_box(solution());
}
let end = time::precise_time_ns();
println!("{}", end - start);
}
} | use std::io::stdio;
use std::iter; | random_line_split |
003.rs | #![feature(slicing_syntax)]
extern crate test;
extern crate time;
use std::io::stdio;
use std::iter;
use std::os;
fn solution() -> u64 {
let mut n = 600_851_475_143;
for factor in iter::count(3, 2) {
while n % factor == 0 {
n /= factor;
}
if factor * factor > n {
return n;
} else if n == 1 {
return factor;
}
}
unreachable!();
}
fn main() | {
match os::args()[] {
[_, ref flag] if flag[] == "-a" => return println!("{}", solution()),
_ => {},
}
for line in stdio::stdin().lock().lines() {
let iters: u64 = line.unwrap()[].trim().parse().unwrap();
let start = time::precise_time_ns();
for _ in range(0, iters) {
test::black_box(solution());
}
let end = time::precise_time_ns();
println!("{}", end - start);
}
} | identifier_body |
|
003.rs | #![feature(slicing_syntax)]
extern crate test;
extern crate time;
use std::io::stdio;
use std::iter;
use std::os;
fn solution() -> u64 {
let mut n = 600_851_475_143;
for factor in iter::count(3, 2) {
while n % factor == 0 {
n /= factor;
}
if factor * factor > n {
return n;
} else if n == 1 {
return factor;
}
}
unreachable!();
}
fn main() {
match os::args()[] {
[_, ref flag] if flag[] == "-a" => return println!("{}", solution()),
_ => | ,
}
for line in stdio::stdin().lock().lines() {
let iters: u64 = line.unwrap()[].trim().parse().unwrap();
let start = time::precise_time_ns();
for _ in range(0, iters) {
test::black_box(solution());
}
let end = time::precise_time_ns();
println!("{}", end - start);
}
}
| {} | conditional_block |
003.rs | #![feature(slicing_syntax)]
extern crate test;
extern crate time;
use std::io::stdio;
use std::iter;
use std::os;
fn solution() -> u64 {
let mut n = 600_851_475_143;
for factor in iter::count(3, 2) {
while n % factor == 0 {
n /= factor;
}
if factor * factor > n {
return n;
} else if n == 1 {
return factor;
}
}
unreachable!();
}
fn | () {
match os::args()[] {
[_, ref flag] if flag[] == "-a" => return println!("{}", solution()),
_ => {},
}
for line in stdio::stdin().lock().lines() {
let iters: u64 = line.unwrap()[].trim().parse().unwrap();
let start = time::precise_time_ns();
for _ in range(0, iters) {
test::black_box(solution());
}
let end = time::precise_time_ns();
println!("{}", end - start);
}
}
| main | identifier_name |
bpf_base.rs | use crate::abi::Endian;
use crate::spec::{LinkerFlavor, MergeFunctions, PanicStrategy, TargetOptions};
pub fn opts(endian: Endian) -> TargetOptions | ..Default::default()
}
}
| {
TargetOptions {
allow_asm: true,
endian,
linker_flavor: LinkerFlavor::BpfLinker,
atomic_cas: false,
executables: true,
dynamic_linking: true,
no_builtins: true,
panic_strategy: PanicStrategy::Abort,
position_independent_executables: true,
// Disable MergeFunctions since:
// - older kernels don't support bpf-to-bpf calls
// - on newer kernels, userspace still needs to relocate before calling
// BPF_PROG_LOAD and not all BPF libraries do that yet
merge_functions: MergeFunctions::Disabled,
obj_is_bitcode: true,
requires_lto: false,
singlethread: true,
max_atomic_width: Some(64), | identifier_body |
bpf_base.rs | use crate::abi::Endian;
use crate::spec::{LinkerFlavor, MergeFunctions, PanicStrategy, TargetOptions};
pub fn opts(endian: Endian) -> TargetOptions {
TargetOptions {
allow_asm: true,
endian,
linker_flavor: LinkerFlavor::BpfLinker,
atomic_cas: false,
executables: true, | no_builtins: true,
panic_strategy: PanicStrategy::Abort,
position_independent_executables: true,
// Disable MergeFunctions since:
// - older kernels don't support bpf-to-bpf calls
// - on newer kernels, userspace still needs to relocate before calling
// BPF_PROG_LOAD and not all BPF libraries do that yet
merge_functions: MergeFunctions::Disabled,
obj_is_bitcode: true,
requires_lto: false,
singlethread: true,
max_atomic_width: Some(64),
..Default::default()
}
} | dynamic_linking: true, | random_line_split |
bpf_base.rs | use crate::abi::Endian;
use crate::spec::{LinkerFlavor, MergeFunctions, PanicStrategy, TargetOptions};
pub fn | (endian: Endian) -> TargetOptions {
TargetOptions {
allow_asm: true,
endian,
linker_flavor: LinkerFlavor::BpfLinker,
atomic_cas: false,
executables: true,
dynamic_linking: true,
no_builtins: true,
panic_strategy: PanicStrategy::Abort,
position_independent_executables: true,
// Disable MergeFunctions since:
// - older kernels don't support bpf-to-bpf calls
// - on newer kernels, userspace still needs to relocate before calling
// BPF_PROG_LOAD and not all BPF libraries do that yet
merge_functions: MergeFunctions::Disabled,
obj_is_bitcode: true,
requires_lto: false,
singlethread: true,
max_atomic_width: Some(64),
..Default::default()
}
}
| opts | identifier_name |
lib.rs | //
// imag - the personal information management suite for the commandline
// Copyright (C) 2015-2020 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
#![forbid(unsafe_code)]
#![recursion_limit="256"]
#![deny(
dead_code,
non_camel_case_types,
non_snake_case,
path_statements,
trivial_numeric_casts,
unstable_features,
unused_allocation,
unused_import_braces,
unused_imports,
unused_must_use,
unused_mut,
unused_qualifications,
while_true,
)]
extern crate itertools;
#[macro_use] extern crate log;
extern crate toml;
extern crate toml_query;
extern crate url;
extern crate sha1;
extern crate hex;
extern crate serde;
#[macro_use] extern crate serde_derive;
#[macro_use] extern crate anyhow;
#[macro_use] extern crate is_match; |
#[macro_use] extern crate libimagstore;
extern crate libimagerror;
extern crate libimagutil;
module_entry_path_mod!("links");
pub mod iter;
pub mod linkable;
pub mod link;
pub mod storecheck; |
#[cfg(test)]
extern crate env_logger; | random_line_split |
types.pre.rs | use std::{fmt, str};
#[derive(Debug)]
pub struct Machine<'a> {
pub memory: CambridgeArray<'a, u8>,
pub output: UTF8Wrapper<'a>,
#ifdef PROFILE
pub trace: ProfileShim,
#endif
}
pub struct CambridgeArray<'a, T: 'a>(pub &'a [T]); // Cambridge is Oxford's rival
pub struct UTF8Wrapper<'a>(pub &'a [u8]);
#ifdef PROFILE
pub struct | (pub fn() -> Profile);
#endif
impl<'a, T: fmt::Display> fmt::Debug for CambridgeArray<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "["));
if self.0.len() > 0 {
for e in &self.0[..] {
try!(write!(f, " {}", e));
}
}
write!(f, " ]")
}
}
impl<'a> fmt::Debug for UTF8Wrapper<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "\n{}", try!(str::from_utf8(self.0).map_err(|_| fmt::Error)))
}
}
#ifdef PROFILE
#[derive(Debug, Default)]
pub struct Profile {
pub instructions: u32,
pub increments: u32, pub decrements: u32, pub overflows: u32, pub underflows: u32,
pub lefts: u32, pub rights: u32, pub left_grows: u32, pub right_grows: u32,
pub ins: u32, pub in_revconvs: u32, pub in_unaries: u32, pub eofs: u32,
pub outs: u32, pub out_revs: u32,
pub loops: u32, pub clears: u32,
pub noops: u32,
}
impl fmt::Debug for ProfileShim {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.0(), f)
}
}
#endif
| ProfileShim | identifier_name |
types.pre.rs | use std::{fmt, str};
#[derive(Debug)]
pub struct Machine<'a> {
pub memory: CambridgeArray<'a, u8>,
pub output: UTF8Wrapper<'a>,
#ifdef PROFILE
pub trace: ProfileShim,
#endif
}
pub struct CambridgeArray<'a, T: 'a>(pub &'a [T]); // Cambridge is Oxford's rival
pub struct UTF8Wrapper<'a>(pub &'a [u8]);
#ifdef PROFILE
pub struct ProfileShim(pub fn() -> Profile);
#endif
impl<'a, T: fmt::Display> fmt::Debug for CambridgeArray<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "["));
if self.0.len() > 0 |
write!(f, " ]")
}
}
impl<'a> fmt::Debug for UTF8Wrapper<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "\n{}", try!(str::from_utf8(self.0).map_err(|_| fmt::Error)))
}
}
#ifdef PROFILE
#[derive(Debug, Default)]
pub struct Profile {
pub instructions: u32,
pub increments: u32, pub decrements: u32, pub overflows: u32, pub underflows: u32,
pub lefts: u32, pub rights: u32, pub left_grows: u32, pub right_grows: u32,
pub ins: u32, pub in_revconvs: u32, pub in_unaries: u32, pub eofs: u32,
pub outs: u32, pub out_revs: u32,
pub loops: u32, pub clears: u32,
pub noops: u32,
}
impl fmt::Debug for ProfileShim {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.0(), f)
}
}
#endif
| {
for e in &self.0[..] {
try!(write!(f, " {}", e));
}
} | conditional_block |
types.pre.rs | use std::{fmt, str};
#[derive(Debug)]
pub struct Machine<'a> {
pub memory: CambridgeArray<'a, u8>,
pub output: UTF8Wrapper<'a>,
#ifdef PROFILE
pub trace: ProfileShim,
#endif
}
pub struct CambridgeArray<'a, T: 'a>(pub &'a [T]); // Cambridge is Oxford's rival
pub struct UTF8Wrapper<'a>(pub &'a [u8]);
#ifdef PROFILE
pub struct ProfileShim(pub fn() -> Profile);
#endif
impl<'a, T: fmt::Display> fmt::Debug for CambridgeArray<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "["));
if self.0.len() > 0 {
for e in &self.0[..] {
try!(write!(f, " {}", e));
}
}
write!(f, " ]")
}
}
impl<'a> fmt::Debug for UTF8Wrapper<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
#ifdef PROFILE
#[derive(Debug, Default)]
pub struct Profile {
pub instructions: u32,
pub increments: u32, pub decrements: u32, pub overflows: u32, pub underflows: u32,
pub lefts: u32, pub rights: u32, pub left_grows: u32, pub right_grows: u32,
pub ins: u32, pub in_revconvs: u32, pub in_unaries: u32, pub eofs: u32,
pub outs: u32, pub out_revs: u32,
pub loops: u32, pub clears: u32,
pub noops: u32,
}
impl fmt::Debug for ProfileShim {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.0(), f)
}
}
#endif
| {
write!(f, "\n{}", try!(str::from_utf8(self.0).map_err(|_| fmt::Error)))
} | identifier_body |
types.pre.rs | use std::{fmt, str};
#[derive(Debug)]
pub struct Machine<'a> {
pub memory: CambridgeArray<'a, u8>,
pub output: UTF8Wrapper<'a>,
#ifdef PROFILE
pub trace: ProfileShim,
#endif
}
| #ifdef PROFILE
pub struct ProfileShim(pub fn() -> Profile);
#endif
impl<'a, T: fmt::Display> fmt::Debug for CambridgeArray<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "["));
if self.0.len() > 0 {
for e in &self.0[..] {
try!(write!(f, " {}", e));
}
}
write!(f, " ]")
}
}
impl<'a> fmt::Debug for UTF8Wrapper<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "\n{}", try!(str::from_utf8(self.0).map_err(|_| fmt::Error)))
}
}
#ifdef PROFILE
#[derive(Debug, Default)]
pub struct Profile {
pub instructions: u32,
pub increments: u32, pub decrements: u32, pub overflows: u32, pub underflows: u32,
pub lefts: u32, pub rights: u32, pub left_grows: u32, pub right_grows: u32,
pub ins: u32, pub in_revconvs: u32, pub in_unaries: u32, pub eofs: u32,
pub outs: u32, pub out_revs: u32,
pub loops: u32, pub clears: u32,
pub noops: u32,
}
impl fmt::Debug for ProfileShim {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.0(), f)
}
}
#endif | pub struct CambridgeArray<'a, T: 'a>(pub &'a [T]); // Cambridge is Oxford's rival
pub struct UTF8Wrapper<'a>(pub &'a [u8]); | random_line_split |
interface.rs | // Copyright 2014 The html5ever Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use tokenizer::states;
use std::borrow::Cow;
use string_cache::{Atom, QualName};
use tendril::StrTendril;
pub use self::TagKind::{StartTag, EndTag};
pub use self::Token::{DoctypeToken, TagToken, CommentToken, CharacterTokens};
pub use self::Token::{NullCharacterToken, EOFToken, ParseError};
/// A `DOCTYPE` token.
// FIXME: already exists in Servo DOM
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct Doctype {
pub name: Option<StrTendril>,
pub public_id: Option<StrTendril>,
pub system_id: Option<StrTendril>,
pub force_quirks: bool,
}
impl Doctype {
pub fn new() -> Doctype {
Doctype {
name: None,
public_id: None,
system_id: None,
force_quirks: false,
}
}
}
/// A tag attribute.
///
/// The namespace on the attribute name is almost always ns!("").
/// The tokenizer creates all attributes this way, but the tree
/// builder will adjust certain attribute names inside foreign
/// content (MathML, SVG).
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug)]
pub struct Attribute {
pub name: QualName,
pub value: StrTendril,
}
#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)]
pub enum TagKind {
StartTag,
EndTag,
}
/// A tag token.
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct Tag {
pub kind: TagKind,
pub name: Atom,
pub self_closing: bool,
pub attrs: Vec<Attribute>,
}
impl Tag {
/// Are the tags equivalent when we don't care about attribute order?
/// Also ignores the self-closing flag.
pub fn equiv_modulo_attr_order(&self, other: &Tag) -> bool |
}
#[derive(PartialEq, Eq, Debug)]
pub enum Token {
DoctypeToken(Doctype),
TagToken(Tag),
CommentToken(StrTendril),
CharacterTokens(StrTendril),
NullCharacterToken,
EOFToken,
ParseError(Cow<'static, str>),
}
// FIXME: rust-lang/rust#22629
unsafe impl Send for Token { }
/// Types which can receive tokens from the tokenizer.
pub trait TokenSink {
/// Process a token.
fn process_token(&mut self, token: Token);
/// Used in the markup declaration open state. By default, this always
/// returns false and thus all CDATA sections are tokenized as bogus
/// comments.
/// https://html.spec.whatwg.org/multipage/#markup-declaration-open-state
fn adjusted_current_node_present_but_not_in_html_namespace(&self) -> bool {
false
}
/// The tokenizer will call this after emitting any tag.
/// This allows the tree builder to change the tokenizer's state.
/// By default no state changes occur.
fn query_state_change(&mut self) -> Option<states::State> {
None
}
}
| {
if (self.kind != other.kind) || (self.name != other.name) {
return false;
}
let mut self_attrs = self.attrs.clone();
let mut other_attrs = other.attrs.clone();
self_attrs.sort();
other_attrs.sort();
self_attrs == other_attrs
} | identifier_body |
interface.rs | // Copyright 2014 The html5ever Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use tokenizer::states;
use std::borrow::Cow;
use string_cache::{Atom, QualName};
use tendril::StrTendril;
pub use self::TagKind::{StartTag, EndTag};
pub use self::Token::{DoctypeToken, TagToken, CommentToken, CharacterTokens};
pub use self::Token::{NullCharacterToken, EOFToken, ParseError};
/// A `DOCTYPE` token.
// FIXME: already exists in Servo DOM
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct Doctype {
pub name: Option<StrTendril>,
pub public_id: Option<StrTendril>,
pub system_id: Option<StrTendril>,
pub force_quirks: bool,
}
impl Doctype {
pub fn new() -> Doctype {
Doctype {
name: None,
public_id: None,
system_id: None,
force_quirks: false,
}
}
}
/// A tag attribute.
///
/// The namespace on the attribute name is almost always ns!("").
/// The tokenizer creates all attributes this way, but the tree
/// builder will adjust certain attribute names inside foreign
/// content (MathML, SVG).
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug)]
pub struct Attribute {
pub name: QualName,
pub value: StrTendril,
}
#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)]
pub enum TagKind {
StartTag,
EndTag,
}
/// A tag token.
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct Tag {
pub kind: TagKind,
pub name: Atom,
pub self_closing: bool,
pub attrs: Vec<Attribute>,
}
impl Tag {
/// Are the tags equivalent when we don't care about attribute order?
/// Also ignores the self-closing flag.
pub fn | (&self, other: &Tag) -> bool {
if (self.kind!= other.kind) || (self.name!= other.name) {
return false;
}
let mut self_attrs = self.attrs.clone();
let mut other_attrs = other.attrs.clone();
self_attrs.sort();
other_attrs.sort();
self_attrs == other_attrs
}
}
#[derive(PartialEq, Eq, Debug)]
pub enum Token {
DoctypeToken(Doctype),
TagToken(Tag),
CommentToken(StrTendril),
CharacterTokens(StrTendril),
NullCharacterToken,
EOFToken,
ParseError(Cow<'static, str>),
}
// FIXME: rust-lang/rust#22629
unsafe impl Send for Token { }
/// Types which can receive tokens from the tokenizer.
pub trait TokenSink {
/// Process a token.
fn process_token(&mut self, token: Token);
/// Used in the markup declaration open state. By default, this always
/// returns false and thus all CDATA sections are tokenized as bogus
/// comments.
/// https://html.spec.whatwg.org/multipage/#markup-declaration-open-state
fn adjusted_current_node_present_but_not_in_html_namespace(&self) -> bool {
false
}
/// The tokenizer will call this after emitting any tag.
/// This allows the tree builder to change the tokenizer's state.
/// By default no state changes occur.
fn query_state_change(&mut self) -> Option<states::State> {
None
}
}
| equiv_modulo_attr_order | identifier_name |
interface.rs | // Copyright 2014 The html5ever Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use tokenizer::states;
use std::borrow::Cow;
use string_cache::{Atom, QualName};
use tendril::StrTendril;
pub use self::TagKind::{StartTag, EndTag};
pub use self::Token::{DoctypeToken, TagToken, CommentToken, CharacterTokens};
pub use self::Token::{NullCharacterToken, EOFToken, ParseError};
/// A `DOCTYPE` token.
// FIXME: already exists in Servo DOM
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct Doctype {
pub name: Option<StrTendril>,
pub public_id: Option<StrTendril>,
pub system_id: Option<StrTendril>,
pub force_quirks: bool,
}
impl Doctype {
pub fn new() -> Doctype {
Doctype {
name: None,
public_id: None,
system_id: None,
force_quirks: false,
}
}
}
/// A tag attribute.
///
/// The namespace on the attribute name is almost always ns!("").
/// The tokenizer creates all attributes this way, but the tree
/// builder will adjust certain attribute names inside foreign
/// content (MathML, SVG).
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug)]
pub struct Attribute {
pub name: QualName,
pub value: StrTendril,
}
#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)]
pub enum TagKind {
StartTag,
EndTag,
}
/// A tag token.
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct Tag {
pub kind: TagKind,
pub name: Atom,
pub self_closing: bool,
pub attrs: Vec<Attribute>,
}
impl Tag {
/// Are the tags equivalent when we don't care about attribute order?
/// Also ignores the self-closing flag.
pub fn equiv_modulo_attr_order(&self, other: &Tag) -> bool {
if (self.kind!= other.kind) || (self.name!= other.name) {
return false;
}
let mut self_attrs = self.attrs.clone();
let mut other_attrs = other.attrs.clone();
self_attrs.sort();
other_attrs.sort();
self_attrs == other_attrs
}
}
#[derive(PartialEq, Eq, Debug)]
pub enum Token {
DoctypeToken(Doctype),
TagToken(Tag),
CommentToken(StrTendril),
CharacterTokens(StrTendril),
NullCharacterToken,
EOFToken,
ParseError(Cow<'static, str>),
}
// FIXME: rust-lang/rust#22629
unsafe impl Send for Token { }
/// Types which can receive tokens from the tokenizer.
pub trait TokenSink {
/// Process a token.
fn process_token(&mut self, token: Token);
/// Used in the markup declaration open state. By default, this always
/// returns false and thus all CDATA sections are tokenized as bogus
/// comments.
/// https://html.spec.whatwg.org/multipage/#markup-declaration-open-state
fn adjusted_current_node_present_but_not_in_html_namespace(&self) -> bool {
false
} | None
}
} |
/// The tokenizer will call this after emitting any tag.
/// This allows the tree builder to change the tokenizer's state.
/// By default no state changes occur.
fn query_state_change(&mut self) -> Option<states::State> { | random_line_split |
interface.rs | // Copyright 2014 The html5ever Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use tokenizer::states;
use std::borrow::Cow;
use string_cache::{Atom, QualName};
use tendril::StrTendril;
pub use self::TagKind::{StartTag, EndTag};
pub use self::Token::{DoctypeToken, TagToken, CommentToken, CharacterTokens};
pub use self::Token::{NullCharacterToken, EOFToken, ParseError};
/// A `DOCTYPE` token.
// FIXME: already exists in Servo DOM
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct Doctype {
pub name: Option<StrTendril>,
pub public_id: Option<StrTendril>,
pub system_id: Option<StrTendril>,
pub force_quirks: bool,
}
impl Doctype {
pub fn new() -> Doctype {
Doctype {
name: None,
public_id: None,
system_id: None,
force_quirks: false,
}
}
}
/// A tag attribute.
///
/// The namespace on the attribute name is almost always ns!("").
/// The tokenizer creates all attributes this way, but the tree
/// builder will adjust certain attribute names inside foreign
/// content (MathML, SVG).
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug)]
pub struct Attribute {
pub name: QualName,
pub value: StrTendril,
}
#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)]
pub enum TagKind {
StartTag,
EndTag,
}
/// A tag token.
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct Tag {
pub kind: TagKind,
pub name: Atom,
pub self_closing: bool,
pub attrs: Vec<Attribute>,
}
impl Tag {
/// Are the tags equivalent when we don't care about attribute order?
/// Also ignores the self-closing flag.
pub fn equiv_modulo_attr_order(&self, other: &Tag) -> bool {
if (self.kind!= other.kind) || (self.name!= other.name) |
let mut self_attrs = self.attrs.clone();
let mut other_attrs = other.attrs.clone();
self_attrs.sort();
other_attrs.sort();
self_attrs == other_attrs
}
}
#[derive(PartialEq, Eq, Debug)]
pub enum Token {
DoctypeToken(Doctype),
TagToken(Tag),
CommentToken(StrTendril),
CharacterTokens(StrTendril),
NullCharacterToken,
EOFToken,
ParseError(Cow<'static, str>),
}
// FIXME: rust-lang/rust#22629
unsafe impl Send for Token { }
/// Types which can receive tokens from the tokenizer.
pub trait TokenSink {
/// Process a token.
fn process_token(&mut self, token: Token);
/// Used in the markup declaration open state. By default, this always
/// returns false and thus all CDATA sections are tokenized as bogus
/// comments.
/// https://html.spec.whatwg.org/multipage/#markup-declaration-open-state
fn adjusted_current_node_present_but_not_in_html_namespace(&self) -> bool {
false
}
/// The tokenizer will call this after emitting any tag.
/// This allows the tree builder to change the tokenizer's state.
/// By default no state changes occur.
fn query_state_change(&mut self) -> Option<states::State> {
None
}
}
| {
return false;
} | conditional_block |
filemanager.rs | use std::path::Path;
use crate::channel::*;
use crate::threadpool::*;
use crossbeam_channel::*;
pub enum FileManagerRequests {
ReadAll {
file: String,
sender: Sender<Message>,
},
}
pub struct ReadAllResult {}
pub enum FileManagerRequestsResponses {
Ok,
ReadAllResult(ReadAllResult),
}
pub struct FileManager<'a> {
epoll: libc::c_int,
dispatcher: TypedThreadDispatcher<'a, FileManagerRequests, FileManagerRequestsResponses>,
}
pub struct TempFile {
pub path: std::path::PathBuf,
}
impl TempFile {
pub fn all_equal(path: &str, data: u8) -> std::io::Result<Self> {
use std::io::*;
let _ = std::fs::remove_file(&path);
let mut f = std::fs::File::create(&path)?;
let data = vec![data; 16 * 1024];
f.write_all(&data)?;
f.sync_all()?;
Ok(Self { path: path.into() })
}
pub fn with_callback<F>(path: &str, f: F) -> std::io::Result<Self>
where
F: Fn(usize) -> u8,
{
use std::io::*;
let _ = std::fs::remove_file(&path);
let mut file = std::fs::File::create(&path)?;
let mut data = vec![0u8; 16 * 1024];
for (i, v) in data.iter_mut().enumerate() {
*v = f(i);
}
file.write_all(&data)?;
file.sync_all()?;
Ok(Self { path: path.into() })
}
pub fn random(path: &str, mut size: usize) -> std::io::Result<Self> {
use std::io::*;
let _ = std::fs::remove_file(&path);
let mut file = std::fs::File::create(&path)?;
let mut data = vec![0u8; 4 * 1024];
while size > 0 {
for v in (4 * 1024).min(0) {
*v = rand::random::<u8>()
}
size -= data.len();
file.write_all(&data)?;
}
file.sync_all()?;
Ok(Self { path: path.into() })
}
}
impl Drop for TempFile {
fn drop(&mut self) {
let _ = std::fs::remove_file(self.path.as_path());
}
}
pub fn handle_read_all<P: AsRef<Path>>(file: P, sender: &Sender<Message>) -> ReadAllResult {
let mut path = file.as_ref().to_str().unwrap().to_string();
path.push('\0');
let fd = {
let r = unsafe {
libc::open(
path.as_ptr() as *const i8,
libc::O_RDONLY, /*| libc::O_NONBLOCK*/
)
};
if r < 0 {
let err = errno::errno();
eprintln!("{}", err);
}
// let flags = unsafe { libc::fcntl(r, libc::F_GETFL, 0) };
// let _rcontrol = unsafe { libc::fcntl(r, libc::F_SETFL, flags | libc::O_NONBLOCK) };
r
};
// let _r = unsafe {
// libc::posix_fadvise(fd, 0, 0, libc::POSIX_FADV_NORMAL | libc::POSIX_FADV_NOREUSE)
// };
let mut offset = 0;
loop {
let mut buffer = Buffer::all_zero(4 * 1024);
buffer.size = unsafe {
let r = libc::pread(
fd,
buffer.data.as_mut_ptr() as *mut libc::c_void,
buffer.size,
offset,
);
if r == 0 {
break;
}
if r < 0 {
let err = errno::errno();
eprintln!("{}", err); // TODO
break;
}
r as usize
};
offset += buffer.size as i64;
let _ = sender.send(Message::Buffer(buffer));
}
let _ = sender.send(Message::Eof);
unsafe { libc::close(fd) };
ReadAllResult {}
}
impl<'a> FileManager<'a> {
pub fn new(pool: &mut Threadpool<'a>) -> Self {
let dispatcher = pool.new_dispatcher(move |request| match request {
FileManagerRequests::ReadAll { file, sender } => {
FileManagerRequestsResponses::ReadAllResult(handle_read_all(file, sender))
}
});
let epoll = {
let r = unsafe { libc::epoll_create1(0) };
if r < 0 {
let err = errno::errno();
eprintln!("{}", err); //TODO
}
r
};
Self { dispatcher, epoll }
}
fn send(&mut self, req: FileManagerRequests) -> RunResult<FileManagerRequestsResponses> {
self.dispatcher.send(req)
}
pub fn read_all(
&mut self,
file: &str,
sender: Sender<Message>,
) -> std::result::Result<
ReceiverFutureMap<FileManagerRequestsResponses, ReadAllResult>,
ThreadpoolRunError,
> {
let future = self
.send(FileManagerRequests::ReadAll {
file: file.to_string(),
sender,
})?
.map(|x| {
if let FileManagerRequestsResponses::ReadAllResult(r) = x {
r | }
});
Ok(future)
}
}
impl<'a> Drop for FileManager<'a> {
fn drop(&mut self) {
if self.epoll > 0 {
unsafe { libc::close(self.epoll) };
}
}
}
#[cfg(test)]
mod tests {
use crate::threadpool::Threadpool;
use crossbeam_channel::*;
#[test]
fn read_all() {
let file =
super::TempFile::all_equal(".test.read_all", 1).expect("Cannot create temo file");
let mut pool = Threadpool::with_qty(1).expect("Cannot create Threadpool");
let mut mgr = super::FileManager::new(&mut pool);
let (sender, receiver) = bounded(4);
let readl_all_result = mgr
.read_all(file.path.to_str().unwrap(), 1, sender)
.expect("Cannot read file");
for _ in 0..4 {
if let Ok(crate::channel::Message::Buffer(buffer, next)) =
receiver.recv_timeout(std::time::Duration::from_secs(1))
{
testlib::assert!(next == 1);
testlib::assert!(buffer.data.len() == 4096);
testlib::assert!(buffer.data.iter().all(|x| *x == 1u8));
}
}
readl_all_result
.wait(std::time::Duration::from_secs(1))
.expect("Read all timeout");
}
} | } else {
panic!("unexpected result") | random_line_split |
filemanager.rs | use std::path::Path;
use crate::channel::*;
use crate::threadpool::*;
use crossbeam_channel::*;
pub enum FileManagerRequests {
ReadAll {
file: String,
sender: Sender<Message>,
},
}
pub struct ReadAllResult {}
pub enum | {
Ok,
ReadAllResult(ReadAllResult),
}
pub struct FileManager<'a> {
epoll: libc::c_int,
dispatcher: TypedThreadDispatcher<'a, FileManagerRequests, FileManagerRequestsResponses>,
}
pub struct TempFile {
pub path: std::path::PathBuf,
}
impl TempFile {
pub fn all_equal(path: &str, data: u8) -> std::io::Result<Self> {
use std::io::*;
let _ = std::fs::remove_file(&path);
let mut f = std::fs::File::create(&path)?;
let data = vec![data; 16 * 1024];
f.write_all(&data)?;
f.sync_all()?;
Ok(Self { path: path.into() })
}
pub fn with_callback<F>(path: &str, f: F) -> std::io::Result<Self>
where
F: Fn(usize) -> u8,
{
use std::io::*;
let _ = std::fs::remove_file(&path);
let mut file = std::fs::File::create(&path)?;
let mut data = vec![0u8; 16 * 1024];
for (i, v) in data.iter_mut().enumerate() {
*v = f(i);
}
file.write_all(&data)?;
file.sync_all()?;
Ok(Self { path: path.into() })
}
pub fn random(path: &str, mut size: usize) -> std::io::Result<Self> {
use std::io::*;
let _ = std::fs::remove_file(&path);
let mut file = std::fs::File::create(&path)?;
let mut data = vec![0u8; 4 * 1024];
while size > 0 {
for v in (4 * 1024).min(0) {
*v = rand::random::<u8>()
}
size -= data.len();
file.write_all(&data)?;
}
file.sync_all()?;
Ok(Self { path: path.into() })
}
}
impl Drop for TempFile {
fn drop(&mut self) {
let _ = std::fs::remove_file(self.path.as_path());
}
}
pub fn handle_read_all<P: AsRef<Path>>(file: P, sender: &Sender<Message>) -> ReadAllResult {
let mut path = file.as_ref().to_str().unwrap().to_string();
path.push('\0');
let fd = {
let r = unsafe {
libc::open(
path.as_ptr() as *const i8,
libc::O_RDONLY, /*| libc::O_NONBLOCK*/
)
};
if r < 0 {
let err = errno::errno();
eprintln!("{}", err);
}
// let flags = unsafe { libc::fcntl(r, libc::F_GETFL, 0) };
// let _rcontrol = unsafe { libc::fcntl(r, libc::F_SETFL, flags | libc::O_NONBLOCK) };
r
};
// let _r = unsafe {
// libc::posix_fadvise(fd, 0, 0, libc::POSIX_FADV_NORMAL | libc::POSIX_FADV_NOREUSE)
// };
let mut offset = 0;
loop {
let mut buffer = Buffer::all_zero(4 * 1024);
buffer.size = unsafe {
let r = libc::pread(
fd,
buffer.data.as_mut_ptr() as *mut libc::c_void,
buffer.size,
offset,
);
if r == 0 {
break;
}
if r < 0 {
let err = errno::errno();
eprintln!("{}", err); // TODO
break;
}
r as usize
};
offset += buffer.size as i64;
let _ = sender.send(Message::Buffer(buffer));
}
let _ = sender.send(Message::Eof);
unsafe { libc::close(fd) };
ReadAllResult {}
}
impl<'a> FileManager<'a> {
pub fn new(pool: &mut Threadpool<'a>) -> Self {
let dispatcher = pool.new_dispatcher(move |request| match request {
FileManagerRequests::ReadAll { file, sender } => {
FileManagerRequestsResponses::ReadAllResult(handle_read_all(file, sender))
}
});
let epoll = {
let r = unsafe { libc::epoll_create1(0) };
if r < 0 {
let err = errno::errno();
eprintln!("{}", err); //TODO
}
r
};
Self { dispatcher, epoll }
}
fn send(&mut self, req: FileManagerRequests) -> RunResult<FileManagerRequestsResponses> {
self.dispatcher.send(req)
}
pub fn read_all(
&mut self,
file: &str,
sender: Sender<Message>,
) -> std::result::Result<
ReceiverFutureMap<FileManagerRequestsResponses, ReadAllResult>,
ThreadpoolRunError,
> {
let future = self
.send(FileManagerRequests::ReadAll {
file: file.to_string(),
sender,
})?
.map(|x| {
if let FileManagerRequestsResponses::ReadAllResult(r) = x {
r
} else {
panic!("unexpected result")
}
});
Ok(future)
}
}
impl<'a> Drop for FileManager<'a> {
fn drop(&mut self) {
if self.epoll > 0 {
unsafe { libc::close(self.epoll) };
}
}
}
#[cfg(test)]
mod tests {
use crate::threadpool::Threadpool;
use crossbeam_channel::*;
#[test]
fn read_all() {
let file =
super::TempFile::all_equal(".test.read_all", 1).expect("Cannot create temo file");
let mut pool = Threadpool::with_qty(1).expect("Cannot create Threadpool");
let mut mgr = super::FileManager::new(&mut pool);
let (sender, receiver) = bounded(4);
let readl_all_result = mgr
.read_all(file.path.to_str().unwrap(), 1, sender)
.expect("Cannot read file");
for _ in 0..4 {
if let Ok(crate::channel::Message::Buffer(buffer, next)) =
receiver.recv_timeout(std::time::Duration::from_secs(1))
{
testlib::assert!(next == 1);
testlib::assert!(buffer.data.len() == 4096);
testlib::assert!(buffer.data.iter().all(|x| *x == 1u8));
}
}
readl_all_result
.wait(std::time::Duration::from_secs(1))
.expect("Read all timeout");
}
}
| FileManagerRequestsResponses | identifier_name |
filemanager.rs | use std::path::Path;
use crate::channel::*;
use crate::threadpool::*;
use crossbeam_channel::*;
pub enum FileManagerRequests {
ReadAll {
file: String,
sender: Sender<Message>,
},
}
pub struct ReadAllResult {}
pub enum FileManagerRequestsResponses {
Ok,
ReadAllResult(ReadAllResult),
}
pub struct FileManager<'a> {
epoll: libc::c_int,
dispatcher: TypedThreadDispatcher<'a, FileManagerRequests, FileManagerRequestsResponses>,
}
pub struct TempFile {
pub path: std::path::PathBuf,
}
impl TempFile {
pub fn all_equal(path: &str, data: u8) -> std::io::Result<Self> {
use std::io::*;
let _ = std::fs::remove_file(&path);
let mut f = std::fs::File::create(&path)?;
let data = vec![data; 16 * 1024];
f.write_all(&data)?;
f.sync_all()?;
Ok(Self { path: path.into() })
}
pub fn with_callback<F>(path: &str, f: F) -> std::io::Result<Self>
where
F: Fn(usize) -> u8,
{
use std::io::*;
let _ = std::fs::remove_file(&path);
let mut file = std::fs::File::create(&path)?;
let mut data = vec![0u8; 16 * 1024];
for (i, v) in data.iter_mut().enumerate() {
*v = f(i);
}
file.write_all(&data)?;
file.sync_all()?;
Ok(Self { path: path.into() })
}
pub fn random(path: &str, mut size: usize) -> std::io::Result<Self> {
use std::io::*;
let _ = std::fs::remove_file(&path);
let mut file = std::fs::File::create(&path)?;
let mut data = vec![0u8; 4 * 1024];
while size > 0 {
for v in (4 * 1024).min(0) {
*v = rand::random::<u8>()
}
size -= data.len();
file.write_all(&data)?;
}
file.sync_all()?;
Ok(Self { path: path.into() })
}
}
impl Drop for TempFile {
fn drop(&mut self) {
let _ = std::fs::remove_file(self.path.as_path());
}
}
pub fn handle_read_all<P: AsRef<Path>>(file: P, sender: &Sender<Message>) -> ReadAllResult {
let mut path = file.as_ref().to_str().unwrap().to_string();
path.push('\0');
let fd = {
let r = unsafe {
libc::open(
path.as_ptr() as *const i8,
libc::O_RDONLY, /*| libc::O_NONBLOCK*/
)
};
if r < 0 {
let err = errno::errno();
eprintln!("{}", err);
}
// let flags = unsafe { libc::fcntl(r, libc::F_GETFL, 0) };
// let _rcontrol = unsafe { libc::fcntl(r, libc::F_SETFL, flags | libc::O_NONBLOCK) };
r
};
// let _r = unsafe {
// libc::posix_fadvise(fd, 0, 0, libc::POSIX_FADV_NORMAL | libc::POSIX_FADV_NOREUSE)
// };
let mut offset = 0;
loop {
let mut buffer = Buffer::all_zero(4 * 1024);
buffer.size = unsafe {
let r = libc::pread(
fd,
buffer.data.as_mut_ptr() as *mut libc::c_void,
buffer.size,
offset,
);
if r == 0 {
break;
}
if r < 0 {
let err = errno::errno();
eprintln!("{}", err); // TODO
break;
}
r as usize
};
offset += buffer.size as i64;
let _ = sender.send(Message::Buffer(buffer));
}
let _ = sender.send(Message::Eof);
unsafe { libc::close(fd) };
ReadAllResult {}
}
impl<'a> FileManager<'a> {
pub fn new(pool: &mut Threadpool<'a>) -> Self {
let dispatcher = pool.new_dispatcher(move |request| match request {
FileManagerRequests::ReadAll { file, sender } => {
FileManagerRequestsResponses::ReadAllResult(handle_read_all(file, sender))
}
});
let epoll = {
let r = unsafe { libc::epoll_create1(0) };
if r < 0 {
let err = errno::errno();
eprintln!("{}", err); //TODO
}
r
};
Self { dispatcher, epoll }
}
fn send(&mut self, req: FileManagerRequests) -> RunResult<FileManagerRequestsResponses> {
self.dispatcher.send(req)
}
pub fn read_all(
&mut self,
file: &str,
sender: Sender<Message>,
) -> std::result::Result<
ReceiverFutureMap<FileManagerRequestsResponses, ReadAllResult>,
ThreadpoolRunError,
> {
let future = self
.send(FileManagerRequests::ReadAll {
file: file.to_string(),
sender,
})?
.map(|x| {
if let FileManagerRequestsResponses::ReadAllResult(r) = x | else {
panic!("unexpected result")
}
});
Ok(future)
}
}
impl<'a> Drop for FileManager<'a> {
fn drop(&mut self) {
if self.epoll > 0 {
unsafe { libc::close(self.epoll) };
}
}
}
#[cfg(test)]
mod tests {
use crate::threadpool::Threadpool;
use crossbeam_channel::*;
#[test]
fn read_all() {
let file =
super::TempFile::all_equal(".test.read_all", 1).expect("Cannot create temo file");
let mut pool = Threadpool::with_qty(1).expect("Cannot create Threadpool");
let mut mgr = super::FileManager::new(&mut pool);
let (sender, receiver) = bounded(4);
let readl_all_result = mgr
.read_all(file.path.to_str().unwrap(), 1, sender)
.expect("Cannot read file");
for _ in 0..4 {
if let Ok(crate::channel::Message::Buffer(buffer, next)) =
receiver.recv_timeout(std::time::Duration::from_secs(1))
{
testlib::assert!(next == 1);
testlib::assert!(buffer.data.len() == 4096);
testlib::assert!(buffer.data.iter().all(|x| *x == 1u8));
}
}
readl_all_result
.wait(std::time::Duration::from_secs(1))
.expect("Read all timeout");
}
}
| {
r
} | conditional_block |
filemanager.rs | use std::path::Path;
use crate::channel::*;
use crate::threadpool::*;
use crossbeam_channel::*;
pub enum FileManagerRequests {
ReadAll {
file: String,
sender: Sender<Message>,
},
}
pub struct ReadAllResult {}
pub enum FileManagerRequestsResponses {
Ok,
ReadAllResult(ReadAllResult),
}
pub struct FileManager<'a> {
epoll: libc::c_int,
dispatcher: TypedThreadDispatcher<'a, FileManagerRequests, FileManagerRequestsResponses>,
}
pub struct TempFile {
pub path: std::path::PathBuf,
}
impl TempFile {
pub fn all_equal(path: &str, data: u8) -> std::io::Result<Self> {
use std::io::*;
let _ = std::fs::remove_file(&path);
let mut f = std::fs::File::create(&path)?;
let data = vec![data; 16 * 1024];
f.write_all(&data)?;
f.sync_all()?;
Ok(Self { path: path.into() })
}
pub fn with_callback<F>(path: &str, f: F) -> std::io::Result<Self>
where
F: Fn(usize) -> u8,
|
pub fn random(path: &str, mut size: usize) -> std::io::Result<Self> {
use std::io::*;
let _ = std::fs::remove_file(&path);
let mut file = std::fs::File::create(&path)?;
let mut data = vec![0u8; 4 * 1024];
while size > 0 {
for v in (4 * 1024).min(0) {
*v = rand::random::<u8>()
}
size -= data.len();
file.write_all(&data)?;
}
file.sync_all()?;
Ok(Self { path: path.into() })
}
}
impl Drop for TempFile {
fn drop(&mut self) {
let _ = std::fs::remove_file(self.path.as_path());
}
}
pub fn handle_read_all<P: AsRef<Path>>(file: P, sender: &Sender<Message>) -> ReadAllResult {
let mut path = file.as_ref().to_str().unwrap().to_string();
path.push('\0');
let fd = {
let r = unsafe {
libc::open(
path.as_ptr() as *const i8,
libc::O_RDONLY, /*| libc::O_NONBLOCK*/
)
};
if r < 0 {
let err = errno::errno();
eprintln!("{}", err);
}
// let flags = unsafe { libc::fcntl(r, libc::F_GETFL, 0) };
// let _rcontrol = unsafe { libc::fcntl(r, libc::F_SETFL, flags | libc::O_NONBLOCK) };
r
};
// let _r = unsafe {
// libc::posix_fadvise(fd, 0, 0, libc::POSIX_FADV_NORMAL | libc::POSIX_FADV_NOREUSE)
// };
let mut offset = 0;
loop {
let mut buffer = Buffer::all_zero(4 * 1024);
buffer.size = unsafe {
let r = libc::pread(
fd,
buffer.data.as_mut_ptr() as *mut libc::c_void,
buffer.size,
offset,
);
if r == 0 {
break;
}
if r < 0 {
let err = errno::errno();
eprintln!("{}", err); // TODO
break;
}
r as usize
};
offset += buffer.size as i64;
let _ = sender.send(Message::Buffer(buffer));
}
let _ = sender.send(Message::Eof);
unsafe { libc::close(fd) };
ReadAllResult {}
}
impl<'a> FileManager<'a> {
pub fn new(pool: &mut Threadpool<'a>) -> Self {
let dispatcher = pool.new_dispatcher(move |request| match request {
FileManagerRequests::ReadAll { file, sender } => {
FileManagerRequestsResponses::ReadAllResult(handle_read_all(file, sender))
}
});
let epoll = {
let r = unsafe { libc::epoll_create1(0) };
if r < 0 {
let err = errno::errno();
eprintln!("{}", err); //TODO
}
r
};
Self { dispatcher, epoll }
}
fn send(&mut self, req: FileManagerRequests) -> RunResult<FileManagerRequestsResponses> {
self.dispatcher.send(req)
}
pub fn read_all(
&mut self,
file: &str,
sender: Sender<Message>,
) -> std::result::Result<
ReceiverFutureMap<FileManagerRequestsResponses, ReadAllResult>,
ThreadpoolRunError,
> {
let future = self
.send(FileManagerRequests::ReadAll {
file: file.to_string(),
sender,
})?
.map(|x| {
if let FileManagerRequestsResponses::ReadAllResult(r) = x {
r
} else {
panic!("unexpected result")
}
});
Ok(future)
}
}
impl<'a> Drop for FileManager<'a> {
fn drop(&mut self) {
if self.epoll > 0 {
unsafe { libc::close(self.epoll) };
}
}
}
#[cfg(test)]
mod tests {
use crate::threadpool::Threadpool;
use crossbeam_channel::*;
#[test]
fn read_all() {
let file =
super::TempFile::all_equal(".test.read_all", 1).expect("Cannot create temo file");
let mut pool = Threadpool::with_qty(1).expect("Cannot create Threadpool");
let mut mgr = super::FileManager::new(&mut pool);
let (sender, receiver) = bounded(4);
let readl_all_result = mgr
.read_all(file.path.to_str().unwrap(), 1, sender)
.expect("Cannot read file");
for _ in 0..4 {
if let Ok(crate::channel::Message::Buffer(buffer, next)) =
receiver.recv_timeout(std::time::Duration::from_secs(1))
{
testlib::assert!(next == 1);
testlib::assert!(buffer.data.len() == 4096);
testlib::assert!(buffer.data.iter().all(|x| *x == 1u8));
}
}
readl_all_result
.wait(std::time::Duration::from_secs(1))
.expect("Read all timeout");
}
}
| {
use std::io::*;
let _ = std::fs::remove_file(&path);
let mut file = std::fs::File::create(&path)?;
let mut data = vec![0u8; 16 * 1024];
for (i, v) in data.iter_mut().enumerate() {
*v = f(i);
}
file.write_all(&data)?;
file.sync_all()?;
Ok(Self { path: path.into() })
} | identifier_body |
macros.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms. | => ( ::syscall::syscall0(
::syscall::nr::$nr) );
($nr:ident, $a1:expr)
=> ( ::syscall::syscall1(
::syscall::nr::$nr,
$a1 as usize) );
($nr:ident, $a1:expr, $a2:expr)
=> ( ::syscall::syscall2(
::syscall::nr::$nr,
$a1 as usize, $a2 as usize) );
($nr:ident, $a1:expr, $a2:expr, $a3:expr)
=> ( ::syscall::syscall3(
::syscall::nr::$nr,
$a1 as usize, $a2 as usize, $a3 as usize) );
($nr:ident, $a1:expr, $a2:expr, $a3:expr, $a4:expr)
=> ( ::syscall::syscall4(
::syscall::nr::$nr,
$a1 as usize, $a2 as usize, $a3 as usize,
$a4 as usize) );
($nr:ident, $a1:expr, $a2:expr, $a3:expr, $a4:expr, $a5:expr)
=> ( ::syscall::syscall5(
::syscall::nr::$nr,
$a1 as usize, $a2 as usize, $a3 as usize,
$a4 as usize, $a5 as usize) );
($nr:ident, $a1:expr, $a2:expr, $a3:expr, $a4:expr, $a5:expr, $a6:expr)
=> ( ::syscall::syscall6(
::syscall::nr::$nr,
$a1 as usize, $a2 as usize, $a3 as usize,
$a4 as usize, $a5 as usize, $a6 as usize) );
} |
#[macro_export]
macro_rules! syscall {
($nr:ident) | random_line_split |
default_impl.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::collections::HashMap;
use std::collections::HashSet;
use std::future::Future;
use std::sync::Arc;
use futures::future::BoxFuture;
use futures::FutureExt;
use futures::StreamExt;
use futures::TryStreamExt;
use crate::namedag::MemNameDag;
use crate::nameset::hints::Hints;
use crate::ops::DagAddHeads;
use crate::ops::IdConvert;
use crate::ops::IdDagAlgorithm;
use crate::ops::Parents;
use crate::DagAlgorithm;
use crate::Id;
use crate::IdSet;
use crate::NameSet;
use crate::Result;
use crate::VertexName;
/// Re-create the graph so it looks better when rendered.
///
/// For example, the left-side graph will be rewritten to the right-side:
///
/// 1. Linearize.
///
/// ```plain,ignore
/// A A # Linearize is done by IdMap::assign_heads,
/// | | # as long as the heads provided are the heads
/// | C B # of the whole graph ("A", "C", not "B", "D").
/// | | |
/// B | -> | C
/// | | | |
/// | D | D
/// |/ |/
/// E E
/// ```
///
/// 2. Reorder branches (at different branching points) to reduce columns.
///
/// ```plain,ignore
/// D B
/// | | # Assuming the main branch is B-C-E.
/// B | | A # Branching point of the D branch is "C"
/// | | |/ # Branching point of the A branch is "C"
/// | | A -> C # The D branch should be moved to below
/// | |/ | # the A branch.
/// | | | D
/// |/| |/
/// C / E
/// |/
/// E
/// ```
///
/// 3. Reorder branches (at a same branching point) to reduce length of
/// edges.
///
/// ```plain,ignore
/// D A
/// | | # This is done by picking the longest
/// | A B # branch (A-B-C-E) as the "main branch"
/// | | | # and work on the remaining branches
/// | B -> C # recursively.
/// | | |
/// | C | D
/// |/ |/
/// E E
/// ```
///
/// `main_branch` optionally defines how to sort the heads. A head `x` will
/// be emitted first during iteration, if `ancestors(x) & main_branch`
/// contains larger vertexes. For example, if `main_branch` is `[C, D, E]`,
/// then `C` will be emitted first, and the returned DAG will have `all()`
/// output `[C, D, A, B, E]`. Practically, `main_branch` usually contains
/// "public" commits.
///
/// This function is expensive. Only run on small graphs.
///
/// This function is currently more optimized for "forking" cases. It is
/// not yet optimized for graphs with many merges.
pub(crate) async fn beautify(
this: &(impl DagAlgorithm +?Sized),
main_branch: Option<NameSet>,
) -> Result<MemNameDag> {
// Find the "largest" branch.
async fn find_main_branch<F, O>(get_ancestors: &F, heads: &[VertexName]) -> Result<NameSet>
where
F: Fn(&VertexName) -> O,
F: Send,
O: Future<Output = Result<NameSet>>,
O: Send,
{
let mut best_branch = NameSet::empty();
let mut best_count = best_branch.count().await?;
for head in heads {
let branch = get_ancestors(head).await?;
let count = branch.count().await?;
if count > best_count {
best_count = count;
best_branch = branch;
}
}
Ok(best_branch)
}
// Sort heads recursively.
// Cannot use "async fn" due to rustc limitation on async recursion.
fn sort<'a: 't, 'b: 't, 't, F, O>(
get_ancestors: &'a F,
heads: &'b mut [VertexName],
main_branch: NameSet,
) -> BoxFuture<'t, Result<()>>
where
F: Fn(&VertexName) -> O,
F: Send + Sync,
O: Future<Output = Result<NameSet>>,
O: Send,
{
let fut = async move {
if heads.len() <= 1 {
return Ok(());
}
// Sort heads by "branching point" on the main branch.
let mut branching_points: HashMap<VertexName, usize> =
HashMap::with_capacity(heads.len());
for head in heads.iter() {
let count = (get_ancestors(head).await? & main_branch.clone())
.count()
.await?;
branching_points.insert(head.clone(), count);
}
heads.sort_by_key(|v| branching_points.get(v));
// For heads with a same branching point, sort them recursively
// using a different "main branch".
let mut start = 0;
let mut start_branching_point: Option<usize> = None;
for end in 0..=heads.len() {
let branching_point = heads
.get(end)
.and_then(|h| branching_points.get(&h).cloned());
if branching_point!= start_branching_point {
if start + 1 < end {
let heads = &mut heads[start..end];
let main_branch = find_main_branch(get_ancestors, heads).await?;
// "boxed" is used to workaround async recursion.
sort(get_ancestors, heads, main_branch).boxed().await?;
}
start = end;
start_branching_point = branching_point;
}
}
Ok(())
};
Box::pin(fut)
}
let main_branch = main_branch.unwrap_or_else(NameSet::empty);
let heads = this
.heads_ancestors(this.all().await?)
.await?
.iter()
.await?;
let mut heads: Vec<_> = heads.try_collect().await?;
let get_ancestors = |head: &VertexName| this.ancestors(head.into());
// Stabilize output if the sort key conflicts.
heads.sort();
sort(&get_ancestors, &mut heads[..], main_branch).await?;
let mut dag = MemNameDag::new();
dag.add_heads(&this.dag_snapshot()?, &heads.into()).await?;
Ok(dag)
}
/// Convert `Set` to a `Parents` implementation that only returns vertexes in the set.
pub(crate) async fn set_to_parents(set: &NameSet) -> Result<Option<impl Parents>> {
let (id_set, id_map) = match set.to_id_set_and_id_map_in_o1() {
Some(v) => v,
None => return Ok(None),
};
let dag = match set.dag() {
None => return Ok(None),
Some(dag) => dag,
};
let id_dag = dag.id_dag_snapshot()?;
// Pre-resolve ids to vertexes. Reduce remote lookup round-trips.
let ids: Vec<Id> = id_set.iter_desc().collect();
id_map.vertex_name_batch(&ids).await?;
struct IdParents {
id_set: IdSet,
id_dag: Arc<dyn IdDagAlgorithm + Send + Sync>,
id_map: Arc<dyn IdConvert + Send + Sync>,
}
#[async_trait::async_trait]
impl Parents for IdParents {
async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> {
tracing::debug!(
target: "dag::idparents",
"resolving parents for {:?}", &name,
);
let id = self.id_map.vertex_id(name).await?;
let direct_parent_ids = self.id_dag.parent_ids(id)?;
let parent_ids = if direct_parent_ids.iter().all(|&id| self.id_set.contains(id)) {
// Fast path. No "leaked" parents.
direct_parent_ids
} else {
// Slower path.
// PERF: There might be room to optimize (ex. dedicated API like
// reachable_roots).
let parent_id_set = IdSet::from_spans(direct_parent_ids);
let ancestors = self.id_dag.ancestors(parent_id_set)?;
let heads = ancestors.intersection(&self.id_set);
let heads = self.id_dag.heads_ancestors(heads)?;
heads.iter_desc().collect()
};
let vertexes = self.id_map.vertex_name_batch(&parent_ids).await?;
let parents = vertexes.into_iter().collect::<Result<Vec<_>>>()?;
Ok(parents)
}
async fn hint_subdag_for_insertion(&self, _heads: &[VertexName]) -> Result<MemNameDag> {
// The `IdParents` is not intended to be inserted to other graphs.
tracing::warn!(
target: "dag::idparents",
"IdParents does not implement hint_subdag_for_insertion() for efficient insertion"
);
Ok(MemNameDag::new())
}
}
let parents = IdParents {
id_set,
id_dag,
id_map,
};
Ok(Some(parents))
}
pub(crate) async fn parents(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
let mut result: Vec<VertexName> = Vec::new();
let mut iter = set.iter().await?;
// PERF: This is not an efficient async implementation.
while let Some(vertex) = iter.next().await {
let parents = this.parent_names(vertex?).await?;
result.extend(parents);
}
Ok(NameSet::from_static_names(result))
}
pub(crate) async fn first_ancestor_nth(
this: &(impl DagAlgorithm +?Sized),
name: VertexName,
n: u64,
) -> Result<Option<VertexName>> {
let mut vertex = name.clone();
for _ in 0..n {
let parents = this.parent_names(vertex).await?;
if parents.is_empty() {
return Ok(None);
}
vertex = parents[0].clone();
}
Ok(Some(vertex))
}
pub(crate) async fn | (
this: &(impl DagAlgorithm +?Sized),
set: NameSet,
) -> Result<NameSet> {
let mut to_visit: Vec<VertexName> = {
let mut list = Vec::with_capacity(set.count().await?);
let mut iter = set.iter().await?;
while let Some(next) = iter.next().await {
let vertex = next?;
list.push(vertex);
}
list
};
let mut visited: HashSet<VertexName> = to_visit.clone().into_iter().collect();
while let Some(v) = to_visit.pop() {
#[allow(clippy::never_loop)]
if let Some(parent) = this.parent_names(v).await?.into_iter().next() {
if visited.insert(parent.clone()) {
to_visit.push(parent);
}
}
}
let hints = Hints::new_inherit_idmap_dag(set.hints());
let set = NameSet::from_iter(visited.into_iter().map(Ok), hints);
this.sort(&set).await
}
pub(crate) async fn heads(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
Ok(set.clone() - this.parents(set).await?)
}
pub(crate) async fn roots(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
Ok(set.clone() - this.children(set).await?)
}
pub(crate) async fn merges(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
let this = this.dag_snapshot()?;
Ok(set.filter(Box::new(move |v: &VertexName| {
let this = this.clone();
Box::pin(async move {
DagAlgorithm::parent_names(&this, v.clone())
.await
.map(|ps| ps.len() >= 2)
})
})))
}
pub(crate) async fn reachable_roots(
this: &(impl DagAlgorithm +?Sized),
roots: NameSet,
heads: NameSet,
) -> Result<NameSet> {
let heads_ancestors = this.ancestors(heads.clone()).await?;
let roots = roots & heads_ancestors.clone(); // Filter out "bogus" roots.
let only = heads_ancestors - this.ancestors(roots.clone()).await?;
Ok(roots.clone() & (heads.clone() | this.parents(only).await?))
}
pub(crate) async fn heads_ancestors(
this: &(impl DagAlgorithm +?Sized),
set: NameSet,
) -> Result<NameSet> {
this.heads(this.ancestors(set).await?).await
}
pub(crate) async fn only(
this: &(impl DagAlgorithm +?Sized),
reachable: NameSet,
unreachable: NameSet,
) -> Result<NameSet> {
let reachable = this.ancestors(reachable).await?;
let unreachable = this.ancestors(unreachable).await?;
Ok(reachable - unreachable)
}
pub(crate) async fn only_both(
this: &(impl DagAlgorithm +?Sized),
reachable: NameSet,
unreachable: NameSet,
) -> Result<(NameSet, NameSet)> {
let reachable = this.ancestors(reachable).await?;
let unreachable = this.ancestors(unreachable).await?;
Ok((reachable - unreachable.clone(), unreachable))
}
pub(crate) async fn gca_one(
this: &(impl DagAlgorithm +?Sized),
set: NameSet,
) -> Result<Option<VertexName>> {
this.gca_all(set)
.await?
.iter()
.await?
.next()
.await
.transpose()
}
pub(crate) async fn gca_all(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
this.heads_ancestors(this.common_ancestors(set).await?)
.await
}
pub(crate) async fn common_ancestors(
this: &(impl DagAlgorithm +?Sized),
set: NameSet,
) -> Result<NameSet> {
let result = match set.count().await? {
0 => set,
1 => this.ancestors(set).await?,
_ => {
// Try to reduce the size of `set`.
// `common_ancestors(X)` = `common_ancestors(roots(X))`.
let set = this.roots(set).await?;
let mut iter = set.iter().await?;
let mut result = this
.ancestors(NameSet::from(iter.next().await.unwrap()?))
.await?;
while let Some(v) = iter.next().await {
result = result.intersection(&this.ancestors(NameSet::from(v?)).await?);
}
result
}
};
Ok(result)
}
pub(crate) async fn is_ancestor(
this: &(impl DagAlgorithm +?Sized),
ancestor: VertexName,
descendant: VertexName,
) -> Result<bool> {
let mut to_visit = vec![descendant];
let mut visited: HashSet<_> = to_visit.clone().into_iter().collect();
while let Some(v) = to_visit.pop() {
if v == ancestor {
return Ok(true);
}
for parent in this.parent_names(v).await? {
if visited.insert(parent.clone()) {
to_visit.push(parent);
}
}
}
Ok(false)
}
#[tracing::instrument(skip(this), level=tracing::Level::DEBUG)]
pub(crate) async fn hint_subdag_for_insertion(
this: &(impl Parents +?Sized),
scope: &NameSet,
heads: &[VertexName],
) -> Result<MemNameDag> {
let count = scope.count().await?;
tracing::trace!("hint_subdag_for_insertion: pending vertexes: {}", count);
// ScopedParents only contains parents within "scope".
struct ScopedParents<'a, P: Parents +?Sized> {
parents: &'a P,
scope: &'a NameSet,
}
#[async_trait::async_trait]
impl<'a, P: Parents +?Sized> Parents for ScopedParents<'a, P> {
async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> {
let parents: Vec<VertexName> = self.parents.parent_names(name).await?;
// Filter by scope. We don't need to provide a "correct" parents here.
// It is only used to optimize network fetches, not used to actually insert
// to the graph.
let mut filtered_parents = Vec::with_capacity(parents.len());
for v in parents {
if self.scope.contains(&v).await? {
filtered_parents.push(v)
}
}
Ok(filtered_parents)
}
async fn hint_subdag_for_insertion(&self, _heads: &[VertexName]) -> Result<MemNameDag> {
// No need to use such a hint (to avoid infinite recursion).
// Pending names should exist in the graph without using remote fetching.
Ok(MemNameDag::new())
}
}
// Insert vertexes in `scope` to `dag`.
let mut dag = MemNameDag::new();
// The MemNameDag should not be lazy.
assert!(!dag.is_vertex_lazy());
let scoped_parents = ScopedParents {
parents: this,
scope,
};
dag.add_heads(&scoped_parents, &heads.into()).await?;
Ok(dag)
}
| first_ancestors | identifier_name |
default_impl.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::collections::HashMap;
use std::collections::HashSet;
use std::future::Future;
use std::sync::Arc;
use futures::future::BoxFuture;
use futures::FutureExt;
use futures::StreamExt;
use futures::TryStreamExt;
use crate::namedag::MemNameDag;
use crate::nameset::hints::Hints;
use crate::ops::DagAddHeads;
use crate::ops::IdConvert;
use crate::ops::IdDagAlgorithm;
use crate::ops::Parents;
use crate::DagAlgorithm;
use crate::Id;
use crate::IdSet;
use crate::NameSet;
use crate::Result;
use crate::VertexName;
/// Re-create the graph so it looks better when rendered.
///
/// For example, the left-side graph will be rewritten to the right-side:
///
/// 1. Linearize.
///
/// ```plain,ignore
/// A A # Linearize is done by IdMap::assign_heads,
/// | | # as long as the heads provided are the heads
/// | C B # of the whole graph ("A", "C", not "B", "D").
/// | | |
/// B | -> | C
/// | | | |
/// | D | D
/// |/ |/
/// E E
/// ```
///
/// 2. Reorder branches (at different branching points) to reduce columns.
///
/// ```plain,ignore
/// D B
/// | | # Assuming the main branch is B-C-E.
/// B | | A # Branching point of the D branch is "C"
/// | | |/ # Branching point of the A branch is "C"
/// | | A -> C # The D branch should be moved to below
/// | |/ | # the A branch.
/// | | | D
/// |/| |/
/// C / E
/// |/
/// E
/// ```
///
/// 3. Reorder branches (at a same branching point) to reduce length of
/// edges.
///
/// ```plain,ignore
/// D A
/// | | # This is done by picking the longest
/// | A B # branch (A-B-C-E) as the "main branch"
/// | | | # and work on the remaining branches
/// | B -> C # recursively.
/// | | |
/// | C | D
/// |/ |/
/// E E
/// ```
///
/// `main_branch` optionally defines how to sort the heads. A head `x` will
/// be emitted first during iteration, if `ancestors(x) & main_branch`
/// contains larger vertexes. For example, if `main_branch` is `[C, D, E]`,
/// then `C` will be emitted first, and the returned DAG will have `all()`
/// output `[C, D, A, B, E]`. Practically, `main_branch` usually contains
/// "public" commits.
///
/// This function is expensive. Only run on small graphs.
///
/// This function is currently more optimized for "forking" cases. It is
/// not yet optimized for graphs with many merges.
pub(crate) async fn beautify(
this: &(impl DagAlgorithm +?Sized),
main_branch: Option<NameSet>,
) -> Result<MemNameDag> {
// Find the "largest" branch.
async fn find_main_branch<F, O>(get_ancestors: &F, heads: &[VertexName]) -> Result<NameSet>
where
F: Fn(&VertexName) -> O,
F: Send,
O: Future<Output = Result<NameSet>>,
O: Send,
{
let mut best_branch = NameSet::empty();
let mut best_count = best_branch.count().await?;
for head in heads {
let branch = get_ancestors(head).await?;
let count = branch.count().await?;
if count > best_count {
best_count = count;
best_branch = branch;
}
}
Ok(best_branch)
}
// Sort heads recursively.
// Cannot use "async fn" due to rustc limitation on async recursion.
fn sort<'a: 't, 'b: 't, 't, F, O>(
get_ancestors: &'a F,
heads: &'b mut [VertexName],
main_branch: NameSet,
) -> BoxFuture<'t, Result<()>>
where
F: Fn(&VertexName) -> O,
F: Send + Sync,
O: Future<Output = Result<NameSet>>,
O: Send,
{
let fut = async move {
if heads.len() <= 1 {
return Ok(());
}
// Sort heads by "branching point" on the main branch.
let mut branching_points: HashMap<VertexName, usize> =
HashMap::with_capacity(heads.len());
for head in heads.iter() {
let count = (get_ancestors(head).await? & main_branch.clone())
.count()
.await?;
branching_points.insert(head.clone(), count);
}
heads.sort_by_key(|v| branching_points.get(v));
// For heads with a same branching point, sort them recursively
// using a different "main branch".
let mut start = 0;
let mut start_branching_point: Option<usize> = None;
for end in 0..=heads.len() {
let branching_point = heads
.get(end)
.and_then(|h| branching_points.get(&h).cloned());
if branching_point!= start_branching_point {
if start + 1 < end {
let heads = &mut heads[start..end];
let main_branch = find_main_branch(get_ancestors, heads).await?;
// "boxed" is used to workaround async recursion.
sort(get_ancestors, heads, main_branch).boxed().await?;
}
start = end;
start_branching_point = branching_point;
}
}
Ok(())
};
Box::pin(fut)
}
let main_branch = main_branch.unwrap_or_else(NameSet::empty);
let heads = this
.heads_ancestors(this.all().await?)
.await?
.iter()
.await?;
let mut heads: Vec<_> = heads.try_collect().await?;
let get_ancestors = |head: &VertexName| this.ancestors(head.into());
// Stabilize output if the sort key conflicts.
heads.sort();
sort(&get_ancestors, &mut heads[..], main_branch).await?;
let mut dag = MemNameDag::new();
dag.add_heads(&this.dag_snapshot()?, &heads.into()).await?;
Ok(dag)
}
/// Convert `Set` to a `Parents` implementation that only returns vertexes in the set.
pub(crate) async fn set_to_parents(set: &NameSet) -> Result<Option<impl Parents>> {
let (id_set, id_map) = match set.to_id_set_and_id_map_in_o1() {
Some(v) => v,
None => return Ok(None),
};
let dag = match set.dag() {
None => return Ok(None),
Some(dag) => dag,
};
let id_dag = dag.id_dag_snapshot()?;
// Pre-resolve ids to vertexes. Reduce remote lookup round-trips.
let ids: Vec<Id> = id_set.iter_desc().collect();
id_map.vertex_name_batch(&ids).await?;
struct IdParents {
id_set: IdSet,
id_dag: Arc<dyn IdDagAlgorithm + Send + Sync>,
id_map: Arc<dyn IdConvert + Send + Sync>,
}
#[async_trait::async_trait]
impl Parents for IdParents {
async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> {
tracing::debug!(
target: "dag::idparents",
"resolving parents for {:?}", &name,
);
let id = self.id_map.vertex_id(name).await?;
let direct_parent_ids = self.id_dag.parent_ids(id)?;
let parent_ids = if direct_parent_ids.iter().all(|&id| self.id_set.contains(id)) {
// Fast path. No "leaked" parents.
direct_parent_ids
} else {
// Slower path.
// PERF: There might be room to optimize (ex. dedicated API like
// reachable_roots).
let parent_id_set = IdSet::from_spans(direct_parent_ids);
let ancestors = self.id_dag.ancestors(parent_id_set)?;
let heads = ancestors.intersection(&self.id_set);
let heads = self.id_dag.heads_ancestors(heads)?;
heads.iter_desc().collect()
};
let vertexes = self.id_map.vertex_name_batch(&parent_ids).await?;
let parents = vertexes.into_iter().collect::<Result<Vec<_>>>()?;
Ok(parents)
}
async fn hint_subdag_for_insertion(&self, _heads: &[VertexName]) -> Result<MemNameDag> {
// The `IdParents` is not intended to be inserted to other graphs.
tracing::warn!(
target: "dag::idparents",
"IdParents does not implement hint_subdag_for_insertion() for efficient insertion"
);
Ok(MemNameDag::new())
}
}
let parents = IdParents {
id_set,
id_dag,
id_map,
};
Ok(Some(parents))
}
pub(crate) async fn parents(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
let mut result: Vec<VertexName> = Vec::new();
let mut iter = set.iter().await?;
// PERF: This is not an efficient async implementation.
while let Some(vertex) = iter.next().await {
let parents = this.parent_names(vertex?).await?;
result.extend(parents);
}
Ok(NameSet::from_static_names(result))
}
pub(crate) async fn first_ancestor_nth(
this: &(impl DagAlgorithm +?Sized),
name: VertexName,
n: u64,
) -> Result<Option<VertexName>> {
let mut vertex = name.clone();
for _ in 0..n {
let parents = this.parent_names(vertex).await?;
if parents.is_empty() {
return Ok(None);
}
vertex = parents[0].clone();
}
Ok(Some(vertex))
}
pub(crate) async fn first_ancestors(
this: &(impl DagAlgorithm +?Sized),
set: NameSet,
) -> Result<NameSet> {
let mut to_visit: Vec<VertexName> = {
let mut list = Vec::with_capacity(set.count().await?);
let mut iter = set.iter().await?;
while let Some(next) = iter.next().await {
let vertex = next?;
list.push(vertex);
}
list
};
let mut visited: HashSet<VertexName> = to_visit.clone().into_iter().collect();
while let Some(v) = to_visit.pop() {
#[allow(clippy::never_loop)]
if let Some(parent) = this.parent_names(v).await?.into_iter().next() {
if visited.insert(parent.clone()) |
}
}
let hints = Hints::new_inherit_idmap_dag(set.hints());
let set = NameSet::from_iter(visited.into_iter().map(Ok), hints);
this.sort(&set).await
}
pub(crate) async fn heads(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
Ok(set.clone() - this.parents(set).await?)
}
pub(crate) async fn roots(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
Ok(set.clone() - this.children(set).await?)
}
pub(crate) async fn merges(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
let this = this.dag_snapshot()?;
Ok(set.filter(Box::new(move |v: &VertexName| {
let this = this.clone();
Box::pin(async move {
DagAlgorithm::parent_names(&this, v.clone())
.await
.map(|ps| ps.len() >= 2)
})
})))
}
pub(crate) async fn reachable_roots(
this: &(impl DagAlgorithm +?Sized),
roots: NameSet,
heads: NameSet,
) -> Result<NameSet> {
let heads_ancestors = this.ancestors(heads.clone()).await?;
let roots = roots & heads_ancestors.clone(); // Filter out "bogus" roots.
let only = heads_ancestors - this.ancestors(roots.clone()).await?;
Ok(roots.clone() & (heads.clone() | this.parents(only).await?))
}
pub(crate) async fn heads_ancestors(
this: &(impl DagAlgorithm +?Sized),
set: NameSet,
) -> Result<NameSet> {
this.heads(this.ancestors(set).await?).await
}
pub(crate) async fn only(
this: &(impl DagAlgorithm +?Sized),
reachable: NameSet,
unreachable: NameSet,
) -> Result<NameSet> {
let reachable = this.ancestors(reachable).await?;
let unreachable = this.ancestors(unreachable).await?;
Ok(reachable - unreachable)
}
pub(crate) async fn only_both(
this: &(impl DagAlgorithm +?Sized),
reachable: NameSet,
unreachable: NameSet,
) -> Result<(NameSet, NameSet)> {
let reachable = this.ancestors(reachable).await?;
let unreachable = this.ancestors(unreachable).await?;
Ok((reachable - unreachable.clone(), unreachable))
}
pub(crate) async fn gca_one(
this: &(impl DagAlgorithm +?Sized),
set: NameSet,
) -> Result<Option<VertexName>> {
this.gca_all(set)
.await?
.iter()
.await?
.next()
.await
.transpose()
}
pub(crate) async fn gca_all(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
this.heads_ancestors(this.common_ancestors(set).await?)
.await
}
pub(crate) async fn common_ancestors(
this: &(impl DagAlgorithm +?Sized),
set: NameSet,
) -> Result<NameSet> {
let result = match set.count().await? {
0 => set,
1 => this.ancestors(set).await?,
_ => {
// Try to reduce the size of `set`.
// `common_ancestors(X)` = `common_ancestors(roots(X))`.
let set = this.roots(set).await?;
let mut iter = set.iter().await?;
let mut result = this
.ancestors(NameSet::from(iter.next().await.unwrap()?))
.await?;
while let Some(v) = iter.next().await {
result = result.intersection(&this.ancestors(NameSet::from(v?)).await?);
}
result
}
};
Ok(result)
}
pub(crate) async fn is_ancestor(
this: &(impl DagAlgorithm +?Sized),
ancestor: VertexName,
descendant: VertexName,
) -> Result<bool> {
let mut to_visit = vec![descendant];
let mut visited: HashSet<_> = to_visit.clone().into_iter().collect();
while let Some(v) = to_visit.pop() {
if v == ancestor {
return Ok(true);
}
for parent in this.parent_names(v).await? {
if visited.insert(parent.clone()) {
to_visit.push(parent);
}
}
}
Ok(false)
}
#[tracing::instrument(skip(this), level=tracing::Level::DEBUG)]
pub(crate) async fn hint_subdag_for_insertion(
this: &(impl Parents +?Sized),
scope: &NameSet,
heads: &[VertexName],
) -> Result<MemNameDag> {
let count = scope.count().await?;
tracing::trace!("hint_subdag_for_insertion: pending vertexes: {}", count);
// ScopedParents only contains parents within "scope".
struct ScopedParents<'a, P: Parents +?Sized> {
parents: &'a P,
scope: &'a NameSet,
}
#[async_trait::async_trait]
impl<'a, P: Parents +?Sized> Parents for ScopedParents<'a, P> {
async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> {
let parents: Vec<VertexName> = self.parents.parent_names(name).await?;
// Filter by scope. We don't need to provide a "correct" parents here.
// It is only used to optimize network fetches, not used to actually insert
// to the graph.
let mut filtered_parents = Vec::with_capacity(parents.len());
for v in parents {
if self.scope.contains(&v).await? {
filtered_parents.push(v)
}
}
Ok(filtered_parents)
}
async fn hint_subdag_for_insertion(&self, _heads: &[VertexName]) -> Result<MemNameDag> {
// No need to use such a hint (to avoid infinite recursion).
// Pending names should exist in the graph without using remote fetching.
Ok(MemNameDag::new())
}
}
// Insert vertexes in `scope` to `dag`.
let mut dag = MemNameDag::new();
// The MemNameDag should not be lazy.
assert!(!dag.is_vertex_lazy());
let scoped_parents = ScopedParents {
parents: this,
scope,
};
dag.add_heads(&scoped_parents, &heads.into()).await?;
Ok(dag)
}
| {
to_visit.push(parent);
} | conditional_block |
default_impl.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::collections::HashMap;
use std::collections::HashSet;
use std::future::Future;
use std::sync::Arc;
use futures::future::BoxFuture;
use futures::FutureExt;
use futures::StreamExt;
use futures::TryStreamExt;
use crate::namedag::MemNameDag;
use crate::nameset::hints::Hints;
use crate::ops::DagAddHeads;
use crate::ops::IdConvert;
use crate::ops::IdDagAlgorithm;
use crate::ops::Parents;
use crate::DagAlgorithm;
use crate::Id;
use crate::IdSet;
use crate::NameSet;
use crate::Result;
use crate::VertexName;
/// Re-create the graph so it looks better when rendered.
///
/// For example, the left-side graph will be rewritten to the right-side:
///
/// 1. Linearize.
///
/// ```plain,ignore
/// A A # Linearize is done by IdMap::assign_heads,
/// | | # as long as the heads provided are the heads
/// | C B # of the whole graph ("A", "C", not "B", "D").
/// | | |
/// B | -> | C
/// | | | |
/// | D | D
/// |/ |/
/// E E
/// ```
///
/// 2. Reorder branches (at different branching points) to reduce columns.
///
/// ```plain,ignore
/// D B
/// | | # Assuming the main branch is B-C-E.
/// B | | A # Branching point of the D branch is "C"
/// | | |/ # Branching point of the A branch is "C"
/// | | A -> C # The D branch should be moved to below
/// | |/ | # the A branch.
/// | | | D
/// |/| |/
/// C / E
/// |/
/// E
/// ```
///
/// 3. Reorder branches (at a same branching point) to reduce length of
/// edges.
///
/// ```plain,ignore
/// D A
/// | | # This is done by picking the longest
/// | A B # branch (A-B-C-E) as the "main branch"
/// | | | # and work on the remaining branches
/// | B -> C # recursively.
/// | | |
/// | C | D
/// |/ |/
/// E E
/// ```
///
/// `main_branch` optionally defines how to sort the heads. A head `x` will
/// be emitted first during iteration, if `ancestors(x) & main_branch`
/// contains larger vertexes. For example, if `main_branch` is `[C, D, E]`,
/// then `C` will be emitted first, and the returned DAG will have `all()`
/// output `[C, D, A, B, E]`. Practically, `main_branch` usually contains
/// "public" commits.
///
/// This function is expensive. Only run on small graphs.
///
/// This function is currently more optimized for "forking" cases. It is
/// not yet optimized for graphs with many merges.
pub(crate) async fn beautify(
this: &(impl DagAlgorithm +?Sized),
main_branch: Option<NameSet>,
) -> Result<MemNameDag> {
// Find the "largest" branch.
async fn find_main_branch<F, O>(get_ancestors: &F, heads: &[VertexName]) -> Result<NameSet>
where
F: Fn(&VertexName) -> O,
F: Send,
O: Future<Output = Result<NameSet>>,
O: Send,
{
let mut best_branch = NameSet::empty();
let mut best_count = best_branch.count().await?;
for head in heads {
let branch = get_ancestors(head).await?;
let count = branch.count().await?;
if count > best_count {
best_count = count;
best_branch = branch;
}
}
Ok(best_branch)
}
// Sort heads recursively.
// Cannot use "async fn" due to rustc limitation on async recursion.
fn sort<'a: 't, 'b: 't, 't, F, O>(
get_ancestors: &'a F,
heads: &'b mut [VertexName],
main_branch: NameSet,
) -> BoxFuture<'t, Result<()>>
where
F: Fn(&VertexName) -> O,
F: Send + Sync,
O: Future<Output = Result<NameSet>>,
O: Send,
{
let fut = async move {
if heads.len() <= 1 {
return Ok(());
}
// Sort heads by "branching point" on the main branch.
let mut branching_points: HashMap<VertexName, usize> =
HashMap::with_capacity(heads.len());
for head in heads.iter() {
let count = (get_ancestors(head).await? & main_branch.clone())
.count()
.await?;
branching_points.insert(head.clone(), count);
}
heads.sort_by_key(|v| branching_points.get(v));
// For heads with a same branching point, sort them recursively
// using a different "main branch".
let mut start = 0;
let mut start_branching_point: Option<usize> = None;
for end in 0..=heads.len() {
let branching_point = heads
.get(end)
.and_then(|h| branching_points.get(&h).cloned());
if branching_point!= start_branching_point {
if start + 1 < end {
let heads = &mut heads[start..end];
let main_branch = find_main_branch(get_ancestors, heads).await?;
// "boxed" is used to workaround async recursion.
sort(get_ancestors, heads, main_branch).boxed().await?;
}
start = end;
start_branching_point = branching_point;
}
}
Ok(())
};
Box::pin(fut)
}
let main_branch = main_branch.unwrap_or_else(NameSet::empty);
let heads = this
.heads_ancestors(this.all().await?)
.await?
.iter()
.await?;
let mut heads: Vec<_> = heads.try_collect().await?;
let get_ancestors = |head: &VertexName| this.ancestors(head.into());
// Stabilize output if the sort key conflicts.
heads.sort();
sort(&get_ancestors, &mut heads[..], main_branch).await?;
let mut dag = MemNameDag::new();
dag.add_heads(&this.dag_snapshot()?, &heads.into()).await?;
Ok(dag)
}
/// Convert `Set` to a `Parents` implementation that only returns vertexes in the set.
pub(crate) async fn set_to_parents(set: &NameSet) -> Result<Option<impl Parents>> {
let (id_set, id_map) = match set.to_id_set_and_id_map_in_o1() {
Some(v) => v,
None => return Ok(None),
};
let dag = match set.dag() {
None => return Ok(None),
Some(dag) => dag,
};
let id_dag = dag.id_dag_snapshot()?;
// Pre-resolve ids to vertexes. Reduce remote lookup round-trips.
let ids: Vec<Id> = id_set.iter_desc().collect();
id_map.vertex_name_batch(&ids).await?;
struct IdParents {
id_set: IdSet,
id_dag: Arc<dyn IdDagAlgorithm + Send + Sync>,
id_map: Arc<dyn IdConvert + Send + Sync>,
}
#[async_trait::async_trait]
impl Parents for IdParents {
async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> {
tracing::debug!(
target: "dag::idparents",
"resolving parents for {:?}", &name,
);
let id = self.id_map.vertex_id(name).await?;
let direct_parent_ids = self.id_dag.parent_ids(id)?;
let parent_ids = if direct_parent_ids.iter().all(|&id| self.id_set.contains(id)) {
// Fast path. No "leaked" parents.
direct_parent_ids
} else {
// Slower path.
// PERF: There might be room to optimize (ex. dedicated API like
// reachable_roots).
let parent_id_set = IdSet::from_spans(direct_parent_ids);
let ancestors = self.id_dag.ancestors(parent_id_set)?;
let heads = ancestors.intersection(&self.id_set);
let heads = self.id_dag.heads_ancestors(heads)?;
heads.iter_desc().collect()
};
let vertexes = self.id_map.vertex_name_batch(&parent_ids).await?;
let parents = vertexes.into_iter().collect::<Result<Vec<_>>>()?;
Ok(parents)
}
async fn hint_subdag_for_insertion(&self, _heads: &[VertexName]) -> Result<MemNameDag> {
// The `IdParents` is not intended to be inserted to other graphs.
tracing::warn!(
target: "dag::idparents",
"IdParents does not implement hint_subdag_for_insertion() for efficient insertion"
);
Ok(MemNameDag::new())
}
}
let parents = IdParents {
id_set,
id_dag,
id_map,
};
Ok(Some(parents))
}
pub(crate) async fn parents(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
let mut result: Vec<VertexName> = Vec::new();
let mut iter = set.iter().await?;
// PERF: This is not an efficient async implementation.
while let Some(vertex) = iter.next().await {
let parents = this.parent_names(vertex?).await?;
result.extend(parents);
}
Ok(NameSet::from_static_names(result))
}
pub(crate) async fn first_ancestor_nth(
this: &(impl DagAlgorithm +?Sized),
name: VertexName,
n: u64,
) -> Result<Option<VertexName>> {
let mut vertex = name.clone();
for _ in 0..n {
let parents = this.parent_names(vertex).await?;
if parents.is_empty() {
return Ok(None);
}
vertex = parents[0].clone();
}
Ok(Some(vertex))
}
pub(crate) async fn first_ancestors(
this: &(impl DagAlgorithm +?Sized),
set: NameSet,
) -> Result<NameSet> {
let mut to_visit: Vec<VertexName> = {
let mut list = Vec::with_capacity(set.count().await?);
let mut iter = set.iter().await?;
while let Some(next) = iter.next().await {
let vertex = next?;
list.push(vertex);
}
list
};
let mut visited: HashSet<VertexName> = to_visit.clone().into_iter().collect();
while let Some(v) = to_visit.pop() {
#[allow(clippy::never_loop)]
if let Some(parent) = this.parent_names(v).await?.into_iter().next() {
if visited.insert(parent.clone()) {
to_visit.push(parent);
}
}
}
let hints = Hints::new_inherit_idmap_dag(set.hints());
let set = NameSet::from_iter(visited.into_iter().map(Ok), hints);
this.sort(&set).await
}
pub(crate) async fn heads(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
Ok(set.clone() - this.parents(set).await?)
}
pub(crate) async fn roots(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
Ok(set.clone() - this.children(set).await?)
}
pub(crate) async fn merges(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
let this = this.dag_snapshot()?;
Ok(set.filter(Box::new(move |v: &VertexName| {
let this = this.clone();
Box::pin(async move {
DagAlgorithm::parent_names(&this, v.clone())
.await
.map(|ps| ps.len() >= 2)
})
})))
}
pub(crate) async fn reachable_roots(
this: &(impl DagAlgorithm +?Sized),
roots: NameSet,
heads: NameSet,
) -> Result<NameSet> {
let heads_ancestors = this.ancestors(heads.clone()).await?;
let roots = roots & heads_ancestors.clone(); // Filter out "bogus" roots.
let only = heads_ancestors - this.ancestors(roots.clone()).await?;
Ok(roots.clone() & (heads.clone() | this.parents(only).await?))
}
pub(crate) async fn heads_ancestors(
this: &(impl DagAlgorithm +?Sized),
set: NameSet,
) -> Result<NameSet> {
this.heads(this.ancestors(set).await?).await
}
pub(crate) async fn only(
this: &(impl DagAlgorithm +?Sized),
reachable: NameSet,
unreachable: NameSet,
) -> Result<NameSet> {
let reachable = this.ancestors(reachable).await?;
let unreachable = this.ancestors(unreachable).await?;
Ok(reachable - unreachable)
}
pub(crate) async fn only_both(
this: &(impl DagAlgorithm +?Sized),
reachable: NameSet,
unreachable: NameSet,
) -> Result<(NameSet, NameSet)> {
let reachable = this.ancestors(reachable).await?;
let unreachable = this.ancestors(unreachable).await?;
Ok((reachable - unreachable.clone(), unreachable))
}
pub(crate) async fn gca_one(
this: &(impl DagAlgorithm +?Sized),
set: NameSet,
) -> Result<Option<VertexName>> |
pub(crate) async fn gca_all(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
this.heads_ancestors(this.common_ancestors(set).await?)
.await
}
pub(crate) async fn common_ancestors(
this: &(impl DagAlgorithm +?Sized),
set: NameSet,
) -> Result<NameSet> {
let result = match set.count().await? {
0 => set,
1 => this.ancestors(set).await?,
_ => {
// Try to reduce the size of `set`.
// `common_ancestors(X)` = `common_ancestors(roots(X))`.
let set = this.roots(set).await?;
let mut iter = set.iter().await?;
let mut result = this
.ancestors(NameSet::from(iter.next().await.unwrap()?))
.await?;
while let Some(v) = iter.next().await {
result = result.intersection(&this.ancestors(NameSet::from(v?)).await?);
}
result
}
};
Ok(result)
}
pub(crate) async fn is_ancestor(
this: &(impl DagAlgorithm +?Sized),
ancestor: VertexName,
descendant: VertexName,
) -> Result<bool> {
let mut to_visit = vec![descendant];
let mut visited: HashSet<_> = to_visit.clone().into_iter().collect();
while let Some(v) = to_visit.pop() {
if v == ancestor {
return Ok(true);
}
for parent in this.parent_names(v).await? {
if visited.insert(parent.clone()) {
to_visit.push(parent);
}
}
}
Ok(false)
}
#[tracing::instrument(skip(this), level=tracing::Level::DEBUG)]
pub(crate) async fn hint_subdag_for_insertion(
this: &(impl Parents +?Sized),
scope: &NameSet,
heads: &[VertexName],
) -> Result<MemNameDag> {
let count = scope.count().await?;
tracing::trace!("hint_subdag_for_insertion: pending vertexes: {}", count);
// ScopedParents only contains parents within "scope".
struct ScopedParents<'a, P: Parents +?Sized> {
parents: &'a P,
scope: &'a NameSet,
}
#[async_trait::async_trait]
impl<'a, P: Parents +?Sized> Parents for ScopedParents<'a, P> {
async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> {
let parents: Vec<VertexName> = self.parents.parent_names(name).await?;
// Filter by scope. We don't need to provide a "correct" parents here.
// It is only used to optimize network fetches, not used to actually insert
// to the graph.
let mut filtered_parents = Vec::with_capacity(parents.len());
for v in parents {
if self.scope.contains(&v).await? {
filtered_parents.push(v)
}
}
Ok(filtered_parents)
}
async fn hint_subdag_for_insertion(&self, _heads: &[VertexName]) -> Result<MemNameDag> {
// No need to use such a hint (to avoid infinite recursion).
// Pending names should exist in the graph without using remote fetching.
Ok(MemNameDag::new())
}
}
// Insert vertexes in `scope` to `dag`.
let mut dag = MemNameDag::new();
// The MemNameDag should not be lazy.
assert!(!dag.is_vertex_lazy());
let scoped_parents = ScopedParents {
parents: this,
scope,
};
dag.add_heads(&scoped_parents, &heads.into()).await?;
Ok(dag)
}
| {
this.gca_all(set)
.await?
.iter()
.await?
.next()
.await
.transpose()
} | identifier_body |
default_impl.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::collections::HashMap;
use std::collections::HashSet;
use std::future::Future;
use std::sync::Arc;
use futures::future::BoxFuture;
use futures::FutureExt;
use futures::StreamExt;
use futures::TryStreamExt;
use crate::namedag::MemNameDag;
use crate::nameset::hints::Hints;
use crate::ops::DagAddHeads;
use crate::ops::IdConvert;
use crate::ops::IdDagAlgorithm;
use crate::ops::Parents;
use crate::DagAlgorithm;
use crate::Id;
use crate::IdSet;
use crate::NameSet;
use crate::Result;
use crate::VertexName;
/// Re-create the graph so it looks better when rendered.
///
/// For example, the left-side graph will be rewritten to the right-side:
///
/// 1. Linearize.
///
/// ```plain,ignore
/// A A # Linearize is done by IdMap::assign_heads,
/// | | # as long as the heads provided are the heads
/// | C B # of the whole graph ("A", "C", not "B", "D").
/// | | |
/// B | -> | C
/// | | | |
/// | D | D
/// |/ |/
/// E E
/// ```
///
/// 2. Reorder branches (at different branching points) to reduce columns.
///
/// ```plain,ignore
/// D B
/// | | # Assuming the main branch is B-C-E.
/// B | | A # Branching point of the D branch is "C"
/// | | |/ # Branching point of the A branch is "C"
/// | | A -> C # The D branch should be moved to below
/// | |/ | # the A branch.
/// | | | D
/// |/| |/
/// C / E
/// |/
/// E
/// ```
///
/// 3. Reorder branches (at a same branching point) to reduce length of
/// edges.
///
/// ```plain,ignore
/// D A
/// | | # This is done by picking the longest
/// | A B # branch (A-B-C-E) as the "main branch"
/// | | | # and work on the remaining branches
/// | B -> C # recursively.
/// | | |
/// | C | D
/// |/ |/
/// E E
/// ```
///
/// `main_branch` optionally defines how to sort the heads. A head `x` will
/// be emitted first during iteration, if `ancestors(x) & main_branch`
/// contains larger vertexes. For example, if `main_branch` is `[C, D, E]`,
/// then `C` will be emitted first, and the returned DAG will have `all()`
/// output `[C, D, A, B, E]`. Practically, `main_branch` usually contains
/// "public" commits.
///
/// This function is expensive. Only run on small graphs.
///
/// This function is currently more optimized for "forking" cases. It is
/// not yet optimized for graphs with many merges.
pub(crate) async fn beautify(
this: &(impl DagAlgorithm +?Sized),
main_branch: Option<NameSet>,
) -> Result<MemNameDag> {
// Find the "largest" branch.
async fn find_main_branch<F, O>(get_ancestors: &F, heads: &[VertexName]) -> Result<NameSet>
where
F: Fn(&VertexName) -> O,
F: Send,
O: Future<Output = Result<NameSet>>,
O: Send,
{
let mut best_branch = NameSet::empty();
let mut best_count = best_branch.count().await?;
for head in heads {
let branch = get_ancestors(head).await?;
let count = branch.count().await?;
if count > best_count {
best_count = count;
best_branch = branch;
}
}
Ok(best_branch)
}
// Sort heads recursively.
// Cannot use "async fn" due to rustc limitation on async recursion.
fn sort<'a: 't, 'b: 't, 't, F, O>(
get_ancestors: &'a F,
heads: &'b mut [VertexName],
main_branch: NameSet,
) -> BoxFuture<'t, Result<()>>
where
F: Fn(&VertexName) -> O,
F: Send + Sync,
O: Future<Output = Result<NameSet>>,
O: Send,
{
let fut = async move {
if heads.len() <= 1 {
return Ok(());
}
// Sort heads by "branching point" on the main branch.
let mut branching_points: HashMap<VertexName, usize> =
HashMap::with_capacity(heads.len());
for head in heads.iter() {
let count = (get_ancestors(head).await? & main_branch.clone())
.count()
.await?;
branching_points.insert(head.clone(), count);
}
heads.sort_by_key(|v| branching_points.get(v));
// For heads with a same branching point, sort them recursively
// using a different "main branch".
let mut start = 0;
let mut start_branching_point: Option<usize> = None;
for end in 0..=heads.len() {
let branching_point = heads
.get(end)
.and_then(|h| branching_points.get(&h).cloned());
if branching_point!= start_branching_point {
if start + 1 < end {
let heads = &mut heads[start..end];
let main_branch = find_main_branch(get_ancestors, heads).await?;
// "boxed" is used to workaround async recursion.
sort(get_ancestors, heads, main_branch).boxed().await?;
}
start = end;
start_branching_point = branching_point;
}
}
Ok(())
};
Box::pin(fut)
}
let main_branch = main_branch.unwrap_or_else(NameSet::empty);
let heads = this
.heads_ancestors(this.all().await?)
.await?
.iter()
.await?;
let mut heads: Vec<_> = heads.try_collect().await?;
let get_ancestors = |head: &VertexName| this.ancestors(head.into());
// Stabilize output if the sort key conflicts.
heads.sort();
sort(&get_ancestors, &mut heads[..], main_branch).await?;
let mut dag = MemNameDag::new();
dag.add_heads(&this.dag_snapshot()?, &heads.into()).await?;
Ok(dag)
}
/// Convert `Set` to a `Parents` implementation that only returns vertexes in the set.
pub(crate) async fn set_to_parents(set: &NameSet) -> Result<Option<impl Parents>> {
let (id_set, id_map) = match set.to_id_set_and_id_map_in_o1() {
Some(v) => v,
None => return Ok(None),
};
let dag = match set.dag() {
None => return Ok(None),
Some(dag) => dag,
};
let id_dag = dag.id_dag_snapshot()?;
// Pre-resolve ids to vertexes. Reduce remote lookup round-trips.
let ids: Vec<Id> = id_set.iter_desc().collect();
id_map.vertex_name_batch(&ids).await?;
struct IdParents {
id_set: IdSet,
id_dag: Arc<dyn IdDagAlgorithm + Send + Sync>,
id_map: Arc<dyn IdConvert + Send + Sync>,
}
#[async_trait::async_trait]
impl Parents for IdParents {
async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> {
tracing::debug!(
target: "dag::idparents",
"resolving parents for {:?}", &name,
);
let id = self.id_map.vertex_id(name).await?;
let direct_parent_ids = self.id_dag.parent_ids(id)?;
let parent_ids = if direct_parent_ids.iter().all(|&id| self.id_set.contains(id)) {
// Fast path. No "leaked" parents.
direct_parent_ids
} else {
// Slower path.
// PERF: There might be room to optimize (ex. dedicated API like
// reachable_roots).
let parent_id_set = IdSet::from_spans(direct_parent_ids);
let ancestors = self.id_dag.ancestors(parent_id_set)?;
let heads = ancestors.intersection(&self.id_set);
let heads = self.id_dag.heads_ancestors(heads)?;
heads.iter_desc().collect()
};
let vertexes = self.id_map.vertex_name_batch(&parent_ids).await?;
let parents = vertexes.into_iter().collect::<Result<Vec<_>>>()?;
Ok(parents)
}
async fn hint_subdag_for_insertion(&self, _heads: &[VertexName]) -> Result<MemNameDag> {
// The `IdParents` is not intended to be inserted to other graphs.
tracing::warn!(
target: "dag::idparents",
"IdParents does not implement hint_subdag_for_insertion() for efficient insertion"
);
Ok(MemNameDag::new())
}
}
let parents = IdParents {
id_set,
id_dag,
id_map,
};
Ok(Some(parents))
}
pub(crate) async fn parents(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
let mut result: Vec<VertexName> = Vec::new();
let mut iter = set.iter().await?;
// PERF: This is not an efficient async implementation.
while let Some(vertex) = iter.next().await {
let parents = this.parent_names(vertex?).await?;
result.extend(parents);
}
Ok(NameSet::from_static_names(result))
}
pub(crate) async fn first_ancestor_nth(
this: &(impl DagAlgorithm +?Sized),
name: VertexName,
n: u64,
) -> Result<Option<VertexName>> {
let mut vertex = name.clone();
for _ in 0..n {
let parents = this.parent_names(vertex).await?;
if parents.is_empty() {
return Ok(None);
}
vertex = parents[0].clone();
}
Ok(Some(vertex))
}
pub(crate) async fn first_ancestors(
this: &(impl DagAlgorithm +?Sized),
set: NameSet,
) -> Result<NameSet> {
let mut to_visit: Vec<VertexName> = {
let mut list = Vec::with_capacity(set.count().await?);
let mut iter = set.iter().await?;
while let Some(next) = iter.next().await {
let vertex = next?;
list.push(vertex);
}
list
};
let mut visited: HashSet<VertexName> = to_visit.clone().into_iter().collect();
while let Some(v) = to_visit.pop() {
#[allow(clippy::never_loop)]
if let Some(parent) = this.parent_names(v).await?.into_iter().next() {
if visited.insert(parent.clone()) {
to_visit.push(parent);
}
}
}
let hints = Hints::new_inherit_idmap_dag(set.hints());
let set = NameSet::from_iter(visited.into_iter().map(Ok), hints);
this.sort(&set).await
}
pub(crate) async fn heads(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
Ok(set.clone() - this.parents(set).await?)
}
pub(crate) async fn roots(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
Ok(set.clone() - this.children(set).await?)
}
pub(crate) async fn merges(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
let this = this.dag_snapshot()?;
Ok(set.filter(Box::new(move |v: &VertexName| {
let this = this.clone();
Box::pin(async move {
DagAlgorithm::parent_names(&this, v.clone())
.await
.map(|ps| ps.len() >= 2)
})
})))
}
pub(crate) async fn reachable_roots(
this: &(impl DagAlgorithm +?Sized),
roots: NameSet,
heads: NameSet,
) -> Result<NameSet> {
let heads_ancestors = this.ancestors(heads.clone()).await?;
let roots = roots & heads_ancestors.clone(); // Filter out "bogus" roots.
let only = heads_ancestors - this.ancestors(roots.clone()).await?;
Ok(roots.clone() & (heads.clone() | this.parents(only).await?))
}
pub(crate) async fn heads_ancestors(
this: &(impl DagAlgorithm +?Sized),
set: NameSet,
) -> Result<NameSet> {
this.heads(this.ancestors(set).await?).await
}
pub(crate) async fn only(
this: &(impl DagAlgorithm +?Sized),
reachable: NameSet,
unreachable: NameSet,
) -> Result<NameSet> {
let reachable = this.ancestors(reachable).await?;
let unreachable = this.ancestors(unreachable).await?;
Ok(reachable - unreachable)
}
pub(crate) async fn only_both(
this: &(impl DagAlgorithm +?Sized),
reachable: NameSet,
unreachable: NameSet,
) -> Result<(NameSet, NameSet)> {
let reachable = this.ancestors(reachable).await?;
let unreachable = this.ancestors(unreachable).await?;
Ok((reachable - unreachable.clone(), unreachable))
}
pub(crate) async fn gca_one(
this: &(impl DagAlgorithm +?Sized),
set: NameSet,
) -> Result<Option<VertexName>> {
this.gca_all(set)
.await?
.iter()
.await?
.next()
.await
.transpose()
}
pub(crate) async fn gca_all(this: &(impl DagAlgorithm +?Sized), set: NameSet) -> Result<NameSet> {
this.heads_ancestors(this.common_ancestors(set).await?)
.await
}
pub(crate) async fn common_ancestors(
this: &(impl DagAlgorithm +?Sized),
set: NameSet,
) -> Result<NameSet> {
let result = match set.count().await? {
0 => set,
1 => this.ancestors(set).await?,
_ => {
// Try to reduce the size of `set`.
// `common_ancestors(X)` = `common_ancestors(roots(X))`.
let set = this.roots(set).await?;
let mut iter = set.iter().await?;
let mut result = this
.ancestors(NameSet::from(iter.next().await.unwrap()?))
.await?;
while let Some(v) = iter.next().await {
result = result.intersection(&this.ancestors(NameSet::from(v?)).await?);
}
result
}
};
Ok(result)
}
pub(crate) async fn is_ancestor(
this: &(impl DagAlgorithm +?Sized),
ancestor: VertexName,
descendant: VertexName,
) -> Result<bool> {
let mut to_visit = vec![descendant];
let mut visited: HashSet<_> = to_visit.clone().into_iter().collect();
while let Some(v) = to_visit.pop() {
if v == ancestor {
return Ok(true);
}
for parent in this.parent_names(v).await? {
if visited.insert(parent.clone()) {
to_visit.push(parent);
}
}
}
Ok(false)
}
#[tracing::instrument(skip(this), level=tracing::Level::DEBUG)]
pub(crate) async fn hint_subdag_for_insertion(
this: &(impl Parents +?Sized),
scope: &NameSet,
heads: &[VertexName],
) -> Result<MemNameDag> {
let count = scope.count().await?;
tracing::trace!("hint_subdag_for_insertion: pending vertexes: {}", count);
// ScopedParents only contains parents within "scope".
struct ScopedParents<'a, P: Parents +?Sized> {
parents: &'a P,
scope: &'a NameSet,
}
#[async_trait::async_trait]
impl<'a, P: Parents +?Sized> Parents for ScopedParents<'a, P> {
async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> {
let parents: Vec<VertexName> = self.parents.parent_names(name).await?;
// Filter by scope. We don't need to provide a "correct" parents here.
// It is only used to optimize network fetches, not used to actually insert
// to the graph.
let mut filtered_parents = Vec::with_capacity(parents.len());
for v in parents {
if self.scope.contains(&v).await? {
filtered_parents.push(v) | }
async fn hint_subdag_for_insertion(&self, _heads: &[VertexName]) -> Result<MemNameDag> {
// No need to use such a hint (to avoid infinite recursion).
// Pending names should exist in the graph without using remote fetching.
Ok(MemNameDag::new())
}
}
// Insert vertexes in `scope` to `dag`.
let mut dag = MemNameDag::new();
// The MemNameDag should not be lazy.
assert!(!dag.is_vertex_lazy());
let scoped_parents = ScopedParents {
parents: this,
scope,
};
dag.add_heads(&scoped_parents, &heads.into()).await?;
Ok(dag)
} | }
}
Ok(filtered_parents) | random_line_split |
mouse.rs |
//! Back-end agnostic mouse buttons.
use num::{ FromPrimitive, ToPrimitive };
/// Represent a mouse button.
#[derive(Copy, Clone, RustcDecodable, RustcEncodable, PartialEq,
Eq, Ord, PartialOrd, Hash, Debug)]
pub enum MouseButton {
/// Unknown mouse button.
Unknown,
/// Left mouse button.
Left,
/// Right mouse button.
Right,
/// Middle mouse button.
Middle,
/// Extra mouse button number 1.
X1,
/// Extra mouse button number 2.
X2,
/// Mouse button number 6.
Button6,
/// Mouse button number 7.
Button7,
/// Mouse button number 8.
Button8,
}
impl FromPrimitive for MouseButton {
fn from_u64(n: u64) -> Option<MouseButton> {
match n {
0 => Some(MouseButton::Unknown),
1 => Some(MouseButton::Left),
2 => Some(MouseButton::Right),
3 => Some(MouseButton::Middle),
4 => Some(MouseButton::X1),
5 => Some(MouseButton::X2),
6 => Some(MouseButton::Button6),
7 => Some(MouseButton::Button7),
8 => Some(MouseButton::Button8),
_ => Some(MouseButton::Unknown),
}
}
#[inline(always)]
fn from_i64(n: i64) -> Option<MouseButton> {
FromPrimitive::from_u64(n as u64)
}
#[inline(always)]
fn from_isize(n: isize) -> Option<MouseButton> |
}
impl ToPrimitive for MouseButton {
fn to_u64(&self) -> Option<u64> {
match self {
&MouseButton::Unknown => Some(0),
&MouseButton::Left => Some(1),
&MouseButton::Right => Some(2),
&MouseButton::Middle => Some(3),
&MouseButton::X1 => Some(4),
&MouseButton::X2 => Some(5),
&MouseButton::Button6 => Some(6),
&MouseButton::Button7 => Some(7),
&MouseButton::Button8 => Some(8),
}
}
#[inline(always)]
fn to_i64(&self) -> Option<i64> {
self.to_u64().map(|x| x as i64)
}
#[inline(always)]
fn to_isize(&self) -> Option<isize> {
self.to_u64().map(|x| x as isize)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mouse_button_primitives() {
use num::{ FromPrimitive, ToPrimitive };
for i in 0u64..9 {
let button: MouseButton = FromPrimitive::from_u64(i).unwrap();
let j = ToPrimitive::to_u64(&button).unwrap();
assert_eq!(i, j);
}
}
}
| {
FromPrimitive::from_u64(n as u64)
} | identifier_body |
mouse.rs | //! Back-end agnostic mouse buttons.
use num::{ FromPrimitive, ToPrimitive }; | #[derive(Copy, Clone, RustcDecodable, RustcEncodable, PartialEq,
Eq, Ord, PartialOrd, Hash, Debug)]
pub enum MouseButton {
/// Unknown mouse button.
Unknown,
/// Left mouse button.
Left,
/// Right mouse button.
Right,
/// Middle mouse button.
Middle,
/// Extra mouse button number 1.
X1,
/// Extra mouse button number 2.
X2,
/// Mouse button number 6.
Button6,
/// Mouse button number 7.
Button7,
/// Mouse button number 8.
Button8,
}
impl FromPrimitive for MouseButton {
fn from_u64(n: u64) -> Option<MouseButton> {
match n {
0 => Some(MouseButton::Unknown),
1 => Some(MouseButton::Left),
2 => Some(MouseButton::Right),
3 => Some(MouseButton::Middle),
4 => Some(MouseButton::X1),
5 => Some(MouseButton::X2),
6 => Some(MouseButton::Button6),
7 => Some(MouseButton::Button7),
8 => Some(MouseButton::Button8),
_ => Some(MouseButton::Unknown),
}
}
#[inline(always)]
fn from_i64(n: i64) -> Option<MouseButton> {
FromPrimitive::from_u64(n as u64)
}
#[inline(always)]
fn from_isize(n: isize) -> Option<MouseButton> {
FromPrimitive::from_u64(n as u64)
}
}
impl ToPrimitive for MouseButton {
fn to_u64(&self) -> Option<u64> {
match self {
&MouseButton::Unknown => Some(0),
&MouseButton::Left => Some(1),
&MouseButton::Right => Some(2),
&MouseButton::Middle => Some(3),
&MouseButton::X1 => Some(4),
&MouseButton::X2 => Some(5),
&MouseButton::Button6 => Some(6),
&MouseButton::Button7 => Some(7),
&MouseButton::Button8 => Some(8),
}
}
#[inline(always)]
fn to_i64(&self) -> Option<i64> {
self.to_u64().map(|x| x as i64)
}
#[inline(always)]
fn to_isize(&self) -> Option<isize> {
self.to_u64().map(|x| x as isize)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mouse_button_primitives() {
use num::{ FromPrimitive, ToPrimitive };
for i in 0u64..9 {
let button: MouseButton = FromPrimitive::from_u64(i).unwrap();
let j = ToPrimitive::to_u64(&button).unwrap();
assert_eq!(i, j);
}
}
} |
/// Represent a mouse button. | random_line_split |
mouse.rs |
//! Back-end agnostic mouse buttons.
use num::{ FromPrimitive, ToPrimitive };
/// Represent a mouse button.
#[derive(Copy, Clone, RustcDecodable, RustcEncodable, PartialEq,
Eq, Ord, PartialOrd, Hash, Debug)]
pub enum MouseButton {
/// Unknown mouse button.
Unknown,
/// Left mouse button.
Left,
/// Right mouse button.
Right,
/// Middle mouse button.
Middle,
/// Extra mouse button number 1.
X1,
/// Extra mouse button number 2.
X2,
/// Mouse button number 6.
Button6,
/// Mouse button number 7.
Button7,
/// Mouse button number 8.
Button8,
}
impl FromPrimitive for MouseButton {
fn from_u64(n: u64) -> Option<MouseButton> {
match n {
0 => Some(MouseButton::Unknown),
1 => Some(MouseButton::Left),
2 => Some(MouseButton::Right),
3 => Some(MouseButton::Middle),
4 => Some(MouseButton::X1),
5 => Some(MouseButton::X2),
6 => Some(MouseButton::Button6),
7 => Some(MouseButton::Button7),
8 => Some(MouseButton::Button8),
_ => Some(MouseButton::Unknown),
}
}
#[inline(always)]
fn from_i64(n: i64) -> Option<MouseButton> {
FromPrimitive::from_u64(n as u64)
}
#[inline(always)]
fn from_isize(n: isize) -> Option<MouseButton> {
FromPrimitive::from_u64(n as u64)
}
}
impl ToPrimitive for MouseButton {
fn | (&self) -> Option<u64> {
match self {
&MouseButton::Unknown => Some(0),
&MouseButton::Left => Some(1),
&MouseButton::Right => Some(2),
&MouseButton::Middle => Some(3),
&MouseButton::X1 => Some(4),
&MouseButton::X2 => Some(5),
&MouseButton::Button6 => Some(6),
&MouseButton::Button7 => Some(7),
&MouseButton::Button8 => Some(8),
}
}
#[inline(always)]
fn to_i64(&self) -> Option<i64> {
self.to_u64().map(|x| x as i64)
}
#[inline(always)]
fn to_isize(&self) -> Option<isize> {
self.to_u64().map(|x| x as isize)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mouse_button_primitives() {
use num::{ FromPrimitive, ToPrimitive };
for i in 0u64..9 {
let button: MouseButton = FromPrimitive::from_u64(i).unwrap();
let j = ToPrimitive::to_u64(&button).unwrap();
assert_eq!(i, j);
}
}
}
| to_u64 | identifier_name |
log.rs | /* Copyright (C) 2021 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
use super::modbus::ModbusTransaction;
use crate::jsonbuilder::{JsonBuilder, JsonError};
use sawp_modbus::{Data, Message, Read, Write};
#[no_mangle]
pub extern "C" fn rs_modbus_to_json(tx: &mut ModbusTransaction, js: &mut JsonBuilder) -> bool {
log(tx, js).is_ok()
}
/// populate a json object with transactional information, for logging
fn | (tx: &ModbusTransaction, js: &mut JsonBuilder) -> Result<(), JsonError> {
js.open_object("modbus")?;
js.set_uint("id", tx.id)?;
if let Some(req) = &tx.request {
js.open_object("request")?;
log_message(&req, js)?;
js.close()?;
}
if let Some(resp) = &tx.response {
js.open_object("response")?;
log_message(&resp, js)?;
js.close()?;
}
js.close()?;
Ok(())
}
fn log_message(msg: &Message, js: &mut JsonBuilder) -> Result<(), JsonError> {
js.set_uint("transaction_id", msg.transaction_id.into())?;
js.set_uint("protocol_id", msg.protocol_id.into())?;
js.set_uint("unit_id", msg.unit_id.into())?;
js.set_uint("function_raw", msg.function.raw.into())?;
js.set_string("function_code", &msg.function.code.to_string())?;
js.set_string("access_type", &msg.access_type.to_string())?;
js.set_string("category", &msg.category.to_string())?;
js.set_string("error_flags", &msg.error_flags.to_string())?;
match &msg.data {
Data::Exception(exc) => {
js.open_object("exception")?;
js.set_uint("raw", exc.raw.into())?;
js.set_string("code", &exc.code.to_string())?;
js.close()?;
}
Data::Diagnostic { func, data } => {
js.open_object("diagnostic")?;
js.set_uint("raw", func.raw.into())?;
js.set_string("code", &func.code.to_string())?;
js.set_string_from_bytes("data", &data)?;
js.close()?;
}
Data::MEI { mei_type, data } => {
js.open_object("mei")?;
js.set_uint("raw", mei_type.raw.into())?;
js.set_string("code", &mei_type.code.to_string())?;
js.set_string_from_bytes("data", &data)?;
js.close()?;
}
Data::Read(read) => {
js.open_object("read")?;
log_read(read, js)?;
js.close()?;
}
Data::Write(write) => {
js.open_object("write")?;
log_write(write, js)?;
js.close()?;
}
Data::ReadWrite { read, write } => {
js.open_object("read")?;
log_read(read, js)?;
js.close()?;
js.open_object("write")?;
log_write(write, js)?;
js.close()?;
}
Data::ByteVec(data) => {
js.set_string_from_bytes("data", &data)?;
}
Data::Empty => {}
}
Ok(())
}
fn log_read(read: &Read, js: &mut JsonBuilder) -> Result<(), JsonError> {
match read {
Read::Request { address, quantity } => {
js.set_uint("address", (*address).into())?;
js.set_uint("quantity", (*quantity).into())?;
}
Read::Response(data) => {
js.set_string_from_bytes("data", &data)?;
}
}
Ok(())
}
fn log_write(write: &Write, js: &mut JsonBuilder) -> Result<(), JsonError> {
match write {
Write::MultReq {
address,
quantity,
data,
} => {
js.set_uint("address", (*address).into())?;
js.set_uint("quantity", (*quantity).into())?;
js.set_string_from_bytes("data", &data)?;
}
Write::Mask {
address,
and_mask,
or_mask,
} => {
js.set_uint("address", (*address).into())?;
js.set_uint("and_mask", (*and_mask).into())?;
js.set_uint("or_mask", (*or_mask).into())?;
}
Write::Other { address, data } => {
js.set_uint("address", (*address).into())?;
js.set_uint("data", (*data).into())?;
}
}
Ok(())
}
| log | identifier_name |
log.rs | /* Copyright (C) 2021 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
use super::modbus::ModbusTransaction;
use crate::jsonbuilder::{JsonBuilder, JsonError};
use sawp_modbus::{Data, Message, Read, Write};
#[no_mangle]
pub extern "C" fn rs_modbus_to_json(tx: &mut ModbusTransaction, js: &mut JsonBuilder) -> bool {
log(tx, js).is_ok()
}
/// populate a json object with transactional information, for logging
fn log(tx: &ModbusTransaction, js: &mut JsonBuilder) -> Result<(), JsonError> {
js.open_object("modbus")?;
js.set_uint("id", tx.id)?;
if let Some(req) = &tx.request {
js.open_object("request")?;
log_message(&req, js)?;
js.close()?;
}
| }
js.close()?;
Ok(())
}
fn log_message(msg: &Message, js: &mut JsonBuilder) -> Result<(), JsonError> {
js.set_uint("transaction_id", msg.transaction_id.into())?;
js.set_uint("protocol_id", msg.protocol_id.into())?;
js.set_uint("unit_id", msg.unit_id.into())?;
js.set_uint("function_raw", msg.function.raw.into())?;
js.set_string("function_code", &msg.function.code.to_string())?;
js.set_string("access_type", &msg.access_type.to_string())?;
js.set_string("category", &msg.category.to_string())?;
js.set_string("error_flags", &msg.error_flags.to_string())?;
match &msg.data {
Data::Exception(exc) => {
js.open_object("exception")?;
js.set_uint("raw", exc.raw.into())?;
js.set_string("code", &exc.code.to_string())?;
js.close()?;
}
Data::Diagnostic { func, data } => {
js.open_object("diagnostic")?;
js.set_uint("raw", func.raw.into())?;
js.set_string("code", &func.code.to_string())?;
js.set_string_from_bytes("data", &data)?;
js.close()?;
}
Data::MEI { mei_type, data } => {
js.open_object("mei")?;
js.set_uint("raw", mei_type.raw.into())?;
js.set_string("code", &mei_type.code.to_string())?;
js.set_string_from_bytes("data", &data)?;
js.close()?;
}
Data::Read(read) => {
js.open_object("read")?;
log_read(read, js)?;
js.close()?;
}
Data::Write(write) => {
js.open_object("write")?;
log_write(write, js)?;
js.close()?;
}
Data::ReadWrite { read, write } => {
js.open_object("read")?;
log_read(read, js)?;
js.close()?;
js.open_object("write")?;
log_write(write, js)?;
js.close()?;
}
Data::ByteVec(data) => {
js.set_string_from_bytes("data", &data)?;
}
Data::Empty => {}
}
Ok(())
}
fn log_read(read: &Read, js: &mut JsonBuilder) -> Result<(), JsonError> {
match read {
Read::Request { address, quantity } => {
js.set_uint("address", (*address).into())?;
js.set_uint("quantity", (*quantity).into())?;
}
Read::Response(data) => {
js.set_string_from_bytes("data", &data)?;
}
}
Ok(())
}
fn log_write(write: &Write, js: &mut JsonBuilder) -> Result<(), JsonError> {
match write {
Write::MultReq {
address,
quantity,
data,
} => {
js.set_uint("address", (*address).into())?;
js.set_uint("quantity", (*quantity).into())?;
js.set_string_from_bytes("data", &data)?;
}
Write::Mask {
address,
and_mask,
or_mask,
} => {
js.set_uint("address", (*address).into())?;
js.set_uint("and_mask", (*and_mask).into())?;
js.set_uint("or_mask", (*or_mask).into())?;
}
Write::Other { address, data } => {
js.set_uint("address", (*address).into())?;
js.set_uint("data", (*data).into())?;
}
}
Ok(())
} | if let Some(resp) = &tx.response {
js.open_object("response")?;
log_message(&resp, js)?;
js.close()?; | random_line_split |
log.rs | /* Copyright (C) 2021 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
use super::modbus::ModbusTransaction;
use crate::jsonbuilder::{JsonBuilder, JsonError};
use sawp_modbus::{Data, Message, Read, Write};
#[no_mangle]
pub extern "C" fn rs_modbus_to_json(tx: &mut ModbusTransaction, js: &mut JsonBuilder) -> bool |
/// populate a json object with transactional information, for logging
fn log(tx: &ModbusTransaction, js: &mut JsonBuilder) -> Result<(), JsonError> {
js.open_object("modbus")?;
js.set_uint("id", tx.id)?;
if let Some(req) = &tx.request {
js.open_object("request")?;
log_message(&req, js)?;
js.close()?;
}
if let Some(resp) = &tx.response {
js.open_object("response")?;
log_message(&resp, js)?;
js.close()?;
}
js.close()?;
Ok(())
}
fn log_message(msg: &Message, js: &mut JsonBuilder) -> Result<(), JsonError> {
js.set_uint("transaction_id", msg.transaction_id.into())?;
js.set_uint("protocol_id", msg.protocol_id.into())?;
js.set_uint("unit_id", msg.unit_id.into())?;
js.set_uint("function_raw", msg.function.raw.into())?;
js.set_string("function_code", &msg.function.code.to_string())?;
js.set_string("access_type", &msg.access_type.to_string())?;
js.set_string("category", &msg.category.to_string())?;
js.set_string("error_flags", &msg.error_flags.to_string())?;
match &msg.data {
Data::Exception(exc) => {
js.open_object("exception")?;
js.set_uint("raw", exc.raw.into())?;
js.set_string("code", &exc.code.to_string())?;
js.close()?;
}
Data::Diagnostic { func, data } => {
js.open_object("diagnostic")?;
js.set_uint("raw", func.raw.into())?;
js.set_string("code", &func.code.to_string())?;
js.set_string_from_bytes("data", &data)?;
js.close()?;
}
Data::MEI { mei_type, data } => {
js.open_object("mei")?;
js.set_uint("raw", mei_type.raw.into())?;
js.set_string("code", &mei_type.code.to_string())?;
js.set_string_from_bytes("data", &data)?;
js.close()?;
}
Data::Read(read) => {
js.open_object("read")?;
log_read(read, js)?;
js.close()?;
}
Data::Write(write) => {
js.open_object("write")?;
log_write(write, js)?;
js.close()?;
}
Data::ReadWrite { read, write } => {
js.open_object("read")?;
log_read(read, js)?;
js.close()?;
js.open_object("write")?;
log_write(write, js)?;
js.close()?;
}
Data::ByteVec(data) => {
js.set_string_from_bytes("data", &data)?;
}
Data::Empty => {}
}
Ok(())
}
fn log_read(read: &Read, js: &mut JsonBuilder) -> Result<(), JsonError> {
match read {
Read::Request { address, quantity } => {
js.set_uint("address", (*address).into())?;
js.set_uint("quantity", (*quantity).into())?;
}
Read::Response(data) => {
js.set_string_from_bytes("data", &data)?;
}
}
Ok(())
}
fn log_write(write: &Write, js: &mut JsonBuilder) -> Result<(), JsonError> {
match write {
Write::MultReq {
address,
quantity,
data,
} => {
js.set_uint("address", (*address).into())?;
js.set_uint("quantity", (*quantity).into())?;
js.set_string_from_bytes("data", &data)?;
}
Write::Mask {
address,
and_mask,
or_mask,
} => {
js.set_uint("address", (*address).into())?;
js.set_uint("and_mask", (*and_mask).into())?;
js.set_uint("or_mask", (*or_mask).into())?;
}
Write::Other { address, data } => {
js.set_uint("address", (*address).into())?;
js.set_uint("data", (*data).into())?;
}
}
Ok(())
}
| {
log(tx, js).is_ok()
} | identifier_body |
quiz_error.rs | extern crate diesel;
use self::diesel::result::Error as DatabaseError;
use std::error;
use std::fmt;
use std::convert::From;
#[derive(Debug)]
pub enum QuizError {
DatabaseError(DatabaseError),
JokerUnavailable,
GameAlreadyFinished,
NoGameInProgress,
GameStillInProgress,
StateError,
OutOfResources,
}
impl fmt::Display for QuizError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
QuizError::DatabaseError(ref err) => write!(f, "Database error: {}", err),
QuizError::JokerUnavailable => write!(f, "Joker error: Tried to use unavailable Joker"),
QuizError::GameAlreadyFinished => {
write!(f,
"Game already finished error: Tried to interact with a game that has already been finished")
}
QuizError::NoGameInProgress => {
write!(f,
"No game in progress error: Tried to play without starting a game first")
}
QuizError::GameStillInProgress => {
write!(f,
"Game still in progress error: Tried to start game while old one was not finished yet")
}
QuizError::StateError => {
write!(f,
"State error: Found game in a corrupt state, e.g. no available categories")
}
QuizError::OutOfResources => {
write!(f, "Out of resources error: Answered all possible questions")
}
}
}
}
impl error::Error for QuizError {
fn description(&self) -> &str {
match *self {
QuizError::DatabaseError(ref err) => err.description(),
QuizError::JokerUnavailable => "Joker unavailable error",
QuizError::GameAlreadyFinished => "Game already finished error",
QuizError::GameStillInProgress => "Game still in progress error",
QuizError::NoGameInProgress => "No game in progress error",
QuizError::StateError => "State error",
QuizError::OutOfResources => "Out of resources error",
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
QuizError::DatabaseError(ref err) => Some(err),
_ => None,
}
}
}
impl From<DatabaseError> for QuizError {
fn | (err: DatabaseError) -> Self {
QuizError::DatabaseError(err)
}
}
| from | identifier_name |
quiz_error.rs | extern crate diesel;
use self::diesel::result::Error as DatabaseError;
use std::error;
use std::fmt;
use std::convert::From;
#[derive(Debug)]
pub enum QuizError {
DatabaseError(DatabaseError),
JokerUnavailable,
GameAlreadyFinished,
NoGameInProgress,
GameStillInProgress,
StateError,
OutOfResources,
}
impl fmt::Display for QuizError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
QuizError::DatabaseError(ref err) => write!(f, "Database error: {}", err),
QuizError::JokerUnavailable => write!(f, "Joker error: Tried to use unavailable Joker"),
QuizError::GameAlreadyFinished => { | write!(f,
"Game already finished error: Tried to interact with a game that has already been finished")
}
QuizError::NoGameInProgress => {
write!(f,
"No game in progress error: Tried to play without starting a game first")
}
QuizError::GameStillInProgress => {
write!(f,
"Game still in progress error: Tried to start game while old one was not finished yet")
}
QuizError::StateError => {
write!(f,
"State error: Found game in a corrupt state, e.g. no available categories")
}
QuizError::OutOfResources => {
write!(f, "Out of resources error: Answered all possible questions")
}
}
}
}
impl error::Error for QuizError {
fn description(&self) -> &str {
match *self {
QuizError::DatabaseError(ref err) => err.description(),
QuizError::JokerUnavailable => "Joker unavailable error",
QuizError::GameAlreadyFinished => "Game already finished error",
QuizError::GameStillInProgress => "Game still in progress error",
QuizError::NoGameInProgress => "No game in progress error",
QuizError::StateError => "State error",
QuizError::OutOfResources => "Out of resources error",
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
QuizError::DatabaseError(ref err) => Some(err),
_ => None,
}
}
}
impl From<DatabaseError> for QuizError {
fn from(err: DatabaseError) -> Self {
QuizError::DatabaseError(err)
}
} | random_line_split |
|
quiz_error.rs | extern crate diesel;
use self::diesel::result::Error as DatabaseError;
use std::error;
use std::fmt;
use std::convert::From;
#[derive(Debug)]
pub enum QuizError {
DatabaseError(DatabaseError),
JokerUnavailable,
GameAlreadyFinished,
NoGameInProgress,
GameStillInProgress,
StateError,
OutOfResources,
}
impl fmt::Display for QuizError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result | QuizError::OutOfResources => {
write!(f, "Out of resources error: Answered all possible questions")
}
}
}
}
impl error::Error for QuizError {
fn description(&self) -> &str {
match *self {
QuizError::DatabaseError(ref err) => err.description(),
QuizError::JokerUnavailable => "Joker unavailable error",
QuizError::GameAlreadyFinished => "Game already finished error",
QuizError::GameStillInProgress => "Game still in progress error",
QuizError::NoGameInProgress => "No game in progress error",
QuizError::StateError => "State error",
QuizError::OutOfResources => "Out of resources error",
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
QuizError::DatabaseError(ref err) => Some(err),
_ => None,
}
}
}
impl From<DatabaseError> for QuizError {
fn from(err: DatabaseError) -> Self {
QuizError::DatabaseError(err)
}
}
| {
match *self {
QuizError::DatabaseError(ref err) => write!(f, "Database error: {}", err),
QuizError::JokerUnavailable => write!(f, "Joker error: Tried to use unavailable Joker"),
QuizError::GameAlreadyFinished => {
write!(f,
"Game already finished error: Tried to interact with a game that has already been finished")
}
QuizError::NoGameInProgress => {
write!(f,
"No game in progress error: Tried to play without starting a game first")
}
QuizError::GameStillInProgress => {
write!(f,
"Game still in progress error: Tried to start game while old one was not finished yet")
}
QuizError::StateError => {
write!(f,
"State error: Found game in a corrupt state, e.g. no available categories")
} | identifier_body |
lib.rs | //! # `peeking_take_while`
//!
//! Provides the `peeking_take_while` iterator adaptor method.
//!
//! The `peeking_take_while` method is very similar to `take_while`, but behaves
//! differently when used with a borrowed iterator (perhaps returned by
//! `Iterator::by_ref`).
//!
//! `peeking_take_while` peeks at the next item in the iterator and runs the
//! predicate on that peeked item. This avoids consuming the first item yielded
//! by the underlying iterator for which the predicate returns `false`. On the
//! other hand, `take_while` will consume that first item for which the
//! predicate returns `false`, and it will be lost.
//!
//! ```
//! extern crate peeking_take_while;
//!
//! // Bring the `peeking_take_while` method for peekable iterators into
//! // scope.
//! use peeking_take_while::PeekableExt;
//!
//! # fn main() {
//! // Let's say we have two collections we want to iterate through: `xs` and
//! // `ys`. We want to perform one operation on all the leading contiguous
//! // elements that match some predicate, and a different thing with the rest of
//! // the elements. With the `xs`, we will use the normal `take_while`. With the
//! // `ys`, we will use `peeking_take_while`.
//!
//! let xs: Vec<u8> = (0..100).collect();
//! let ys = xs.clone();
//!
//! let mut iter_xs = xs.into_iter();
//! let mut iter_ys = ys.into_iter().peekable();
//!
//! {
//! // Let's do one thing with all the items that are less than 10.
//! # fn do_things_with<T>(_: T) {}
//!
//! let xs_less_than_ten = iter_xs.by_ref().take_while(|x| *x < 10);
//! for x in xs_less_than_ten {
//! do_things_with(x);
//! }
//!
//! let ys_less_than_ten = iter_ys.by_ref().peeking_take_while(|y| *y < 10);
//! for y in ys_less_than_ten {
//! do_things_with(y);
//! }
//! }
//! | //! // or equal to 10.
//!
//! //...except, when using plain old `take_while` we lost 10!
//! assert_eq!(iter_xs.next(), Some(11));
//!
//! // However, when using `peeking_take_while` we did not! Great!
//! assert_eq!(iter_ys.next(), Some(10));
//! # }
//! ```
use std::iter::Peekable;
/// The iterator returned by `peeking_take_while`.
///
/// See the [module documentation](./index.html) for details.
pub struct PeekingTakeWhile<'a, I, P>
where I: 'a + Iterator
{
iter: &'a mut Peekable<I>,
predicate: P,
}
impl<'a, I, P> Iterator for PeekingTakeWhile<'a, I, P>
where I: Iterator,
I::Item: ::std::fmt::Debug,
P: FnMut(&<I as Iterator>::Item) -> bool
{
type Item = <I as Iterator>::Item;
fn next(&mut self) -> Option<Self::Item> {
let predicate = &mut self.predicate;
if self.iter.peek().map_or(false, |x|!(predicate)(x)) {
None
} else {
self.iter.next()
}
}
}
/// The `Iterator` extension trait that provides the `peeking_take_while`
/// method.
///
/// See the [module documentation](./index.html) for details.
pub trait PeekableExt<'a, I>: Iterator
where I: 'a + Iterator
{
/// The `Iterator` extension trait that provides the `peeking_take_while`
/// method.
///
/// See the [module documentation](./index.html) for details.
fn peeking_take_while<P>(&'a mut self, predicate: P) -> PeekingTakeWhile<'a, I, P>
where Self: Sized,
P: FnMut(&<Self as Iterator>::Item) -> bool;
}
impl<'a, I> PeekableExt<'a, I> for Peekable<I>
where I: 'a + Iterator
{
fn peeking_take_while<P>(&'a mut self, predicate: P) -> PeekingTakeWhile<I, P>
where P: FnMut(&<Self as Iterator>::Item) -> bool
{
PeekingTakeWhile {
iter: self,
predicate: predicate,
}
}
} | //! // And now we will do some other thing with the items that are greater than | random_line_split |
lib.rs | //! # `peeking_take_while`
//!
//! Provides the `peeking_take_while` iterator adaptor method.
//!
//! The `peeking_take_while` method is very similar to `take_while`, but behaves
//! differently when used with a borrowed iterator (perhaps returned by
//! `Iterator::by_ref`).
//!
//! `peeking_take_while` peeks at the next item in the iterator and runs the
//! predicate on that peeked item. This avoids consuming the first item yielded
//! by the underlying iterator for which the predicate returns `false`. On the
//! other hand, `take_while` will consume that first item for which the
//! predicate returns `false`, and it will be lost.
//!
//! ```
//! extern crate peeking_take_while;
//!
//! // Bring the `peeking_take_while` method for peekable iterators into
//! // scope.
//! use peeking_take_while::PeekableExt;
//!
//! # fn main() {
//! // Let's say we have two collections we want to iterate through: `xs` and
//! // `ys`. We want to perform one operation on all the leading contiguous
//! // elements that match some predicate, and a different thing with the rest of
//! // the elements. With the `xs`, we will use the normal `take_while`. With the
//! // `ys`, we will use `peeking_take_while`.
//!
//! let xs: Vec<u8> = (0..100).collect();
//! let ys = xs.clone();
//!
//! let mut iter_xs = xs.into_iter();
//! let mut iter_ys = ys.into_iter().peekable();
//!
//! {
//! // Let's do one thing with all the items that are less than 10.
//! # fn do_things_with<T>(_: T) {}
//!
//! let xs_less_than_ten = iter_xs.by_ref().take_while(|x| *x < 10);
//! for x in xs_less_than_ten {
//! do_things_with(x);
//! }
//!
//! let ys_less_than_ten = iter_ys.by_ref().peeking_take_while(|y| *y < 10);
//! for y in ys_less_than_ten {
//! do_things_with(y);
//! }
//! }
//!
//! // And now we will do some other thing with the items that are greater than
//! // or equal to 10.
//!
//! //...except, when using plain old `take_while` we lost 10!
//! assert_eq!(iter_xs.next(), Some(11));
//!
//! // However, when using `peeking_take_while` we did not! Great!
//! assert_eq!(iter_ys.next(), Some(10));
//! # }
//! ```
use std::iter::Peekable;
/// The iterator returned by `peeking_take_while`.
///
/// See the [module documentation](./index.html) for details.
pub struct PeekingTakeWhile<'a, I, P>
where I: 'a + Iterator
{
iter: &'a mut Peekable<I>,
predicate: P,
}
impl<'a, I, P> Iterator for PeekingTakeWhile<'a, I, P>
where I: Iterator,
I::Item: ::std::fmt::Debug,
P: FnMut(&<I as Iterator>::Item) -> bool
{
type Item = <I as Iterator>::Item;
fn next(&mut self) -> Option<Self::Item> {
let predicate = &mut self.predicate;
if self.iter.peek().map_or(false, |x|!(predicate)(x)) {
None
} else {
self.iter.next()
}
}
}
/// The `Iterator` extension trait that provides the `peeking_take_while`
/// method.
///
/// See the [module documentation](./index.html) for details.
pub trait PeekableExt<'a, I>: Iterator
where I: 'a + Iterator
{
/// The `Iterator` extension trait that provides the `peeking_take_while`
/// method.
///
/// See the [module documentation](./index.html) for details.
fn peeking_take_while<P>(&'a mut self, predicate: P) -> PeekingTakeWhile<'a, I, P>
where Self: Sized,
P: FnMut(&<Self as Iterator>::Item) -> bool;
}
impl<'a, I> PeekableExt<'a, I> for Peekable<I>
where I: 'a + Iterator
{
fn | <P>(&'a mut self, predicate: P) -> PeekingTakeWhile<I, P>
where P: FnMut(&<Self as Iterator>::Item) -> bool
{
PeekingTakeWhile {
iter: self,
predicate: predicate,
}
}
}
| peeking_take_while | identifier_name |
lib.rs | //! # `peeking_take_while`
//!
//! Provides the `peeking_take_while` iterator adaptor method.
//!
//! The `peeking_take_while` method is very similar to `take_while`, but behaves
//! differently when used with a borrowed iterator (perhaps returned by
//! `Iterator::by_ref`).
//!
//! `peeking_take_while` peeks at the next item in the iterator and runs the
//! predicate on that peeked item. This avoids consuming the first item yielded
//! by the underlying iterator for which the predicate returns `false`. On the
//! other hand, `take_while` will consume that first item for which the
//! predicate returns `false`, and it will be lost.
//!
//! ```
//! extern crate peeking_take_while;
//!
//! // Bring the `peeking_take_while` method for peekable iterators into
//! // scope.
//! use peeking_take_while::PeekableExt;
//!
//! # fn main() {
//! // Let's say we have two collections we want to iterate through: `xs` and
//! // `ys`. We want to perform one operation on all the leading contiguous
//! // elements that match some predicate, and a different thing with the rest of
//! // the elements. With the `xs`, we will use the normal `take_while`. With the
//! // `ys`, we will use `peeking_take_while`.
//!
//! let xs: Vec<u8> = (0..100).collect();
//! let ys = xs.clone();
//!
//! let mut iter_xs = xs.into_iter();
//! let mut iter_ys = ys.into_iter().peekable();
//!
//! {
//! // Let's do one thing with all the items that are less than 10.
//! # fn do_things_with<T>(_: T) {}
//!
//! let xs_less_than_ten = iter_xs.by_ref().take_while(|x| *x < 10);
//! for x in xs_less_than_ten {
//! do_things_with(x);
//! }
//!
//! let ys_less_than_ten = iter_ys.by_ref().peeking_take_while(|y| *y < 10);
//! for y in ys_less_than_ten {
//! do_things_with(y);
//! }
//! }
//!
//! // And now we will do some other thing with the items that are greater than
//! // or equal to 10.
//!
//! //...except, when using plain old `take_while` we lost 10!
//! assert_eq!(iter_xs.next(), Some(11));
//!
//! // However, when using `peeking_take_while` we did not! Great!
//! assert_eq!(iter_ys.next(), Some(10));
//! # }
//! ```
use std::iter::Peekable;
/// The iterator returned by `peeking_take_while`.
///
/// See the [module documentation](./index.html) for details.
pub struct PeekingTakeWhile<'a, I, P>
where I: 'a + Iterator
{
iter: &'a mut Peekable<I>,
predicate: P,
}
impl<'a, I, P> Iterator for PeekingTakeWhile<'a, I, P>
where I: Iterator,
I::Item: ::std::fmt::Debug,
P: FnMut(&<I as Iterator>::Item) -> bool
{
type Item = <I as Iterator>::Item;
fn next(&mut self) -> Option<Self::Item> {
let predicate = &mut self.predicate;
if self.iter.peek().map_or(false, |x|!(predicate)(x)) {
None
} else |
}
}
/// The `Iterator` extension trait that provides the `peeking_take_while`
/// method.
///
/// See the [module documentation](./index.html) for details.
pub trait PeekableExt<'a, I>: Iterator
where I: 'a + Iterator
{
/// The `Iterator` extension trait that provides the `peeking_take_while`
/// method.
///
/// See the [module documentation](./index.html) for details.
fn peeking_take_while<P>(&'a mut self, predicate: P) -> PeekingTakeWhile<'a, I, P>
where Self: Sized,
P: FnMut(&<Self as Iterator>::Item) -> bool;
}
impl<'a, I> PeekableExt<'a, I> for Peekable<I>
where I: 'a + Iterator
{
fn peeking_take_while<P>(&'a mut self, predicate: P) -> PeekingTakeWhile<I, P>
where P: FnMut(&<Self as Iterator>::Item) -> bool
{
PeekingTakeWhile {
iter: self,
predicate: predicate,
}
}
}
| {
self.iter.next()
} | conditional_block |
month.rs | //
// imag - the personal information management suite for the commandline
// Copyright (C) 2015-2020 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use std::io::Write;
use std::str::FromStr;
use filters::filter::Filter;
use chrono::NaiveDateTime;
use anyhow::Error;
use anyhow::Result;
use resiter::AndThen;
use resiter::Filter as RFilter;
use libimagstore::store::FileLockEntry;
use libimagtimetrack::store::TimeTrackStore;
use libimagtimetrack::tag::TimeTrackingTag;
use libimagtimetrack::iter::filter::*;
use libimagtimetrack::timetracking::TimeTracking;
use libimagrt::runtime::Runtime;
pub fn month(rt: &Runtime) -> Result<()> | let (year, month) = if now.month() == 12 {
(now.year() + 1, 1)
} else {
(now.year(), now.month() + 1)
};
NaiveDate::from_ymd(year, month, 1).and_hms(0, 0, 0)
},
Some(s) => s?,
};
let tags = cmd
.values_of("tags")
.map(|ts| ts.map(String::from).map(TimeTrackingTag::from).collect::<Vec<_>>());
let start_time_filter = has_start_time_where(move |dt: &NaiveDateTime| {
start <= *dt
});
let end_time_filter = has_end_time_where(move |dt: &NaiveDateTime| {
end >= *dt
});
let tags_filter = move |fle: &FileLockEntry| {
match tags {
Some(ref tags) => has_one_of_tags(&tags).filter(fle),
None => true,
}
};
tags_filter.and(start_time_filter).and(end_time_filter)
};
rt.store()
.get_timetrackings()?
.filter_ok(|e| filter.filter(e))
.and_then_ok(|e| -> Result<_> {
debug!("Processing {:?}", e.get_location());
let tag = e.get_timetrack_tag()?;
debug!(" -> tag = {:?}", tag);
let start = e.get_start_datetime()?;
debug!(" -> start = {:?}", start);
let end = e.get_end_datetime()?;
debug!(" -> end = {:?}", end);
rt.report_touched(e.get_location())
.map_err(Error::from)
.map(|_| (tag, start, end))
})
.and_then_ok(|(tag, start, end)| {
match (start, end) {
(None, _) => writeln!(rt.stdout(), "{} has no start time.", tag),
(Some(s), None) => writeln!(rt.stdout(), "{} | {} -...", tag, s),
(Some(s), Some(e)) => writeln!(rt.stdout(), "{} | {} - {}", tag, s, e),
}.map_err(Error::from)
})
.collect::<Result<Vec<_>>>()
.map(|_| ())
}
| {
let cmd = rt.cli().subcommand().1.unwrap(); // checked in main
let filter = {
use chrono::offset::Local;
use chrono::naive::NaiveDate;
use chrono::Datelike;
let now = Local::now();
let start = match cmd.value_of("start").map(::chrono::naive::NaiveDateTime::from_str) {
None => NaiveDate::from_ymd(now.year(), now.month(), 1).and_hms(0, 0, 0),
Some(s) => s?,
};
let end = match cmd.value_of("end").map(::chrono::naive::NaiveDateTime::from_str) {
None => {
// Is it much harder to go to the last second of the current month than to the first
// second of the next month, right? | identifier_body |
month.rs | //
// imag - the personal information management suite for the commandline
// Copyright (C) 2015-2020 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use std::io::Write;
use std::str::FromStr;
use filters::filter::Filter;
use chrono::NaiveDateTime;
use anyhow::Error;
use anyhow::Result;
use resiter::AndThen;
use resiter::Filter as RFilter;
use libimagstore::store::FileLockEntry;
use libimagtimetrack::store::TimeTrackStore;
use libimagtimetrack::tag::TimeTrackingTag;
use libimagtimetrack::iter::filter::*;
use libimagtimetrack::timetracking::TimeTracking;
use libimagrt::runtime::Runtime;
pub fn | (rt: &Runtime) -> Result<()> {
let cmd = rt.cli().subcommand().1.unwrap(); // checked in main
let filter = {
use chrono::offset::Local;
use chrono::naive::NaiveDate;
use chrono::Datelike;
let now = Local::now();
let start = match cmd.value_of("start").map(::chrono::naive::NaiveDateTime::from_str) {
None => NaiveDate::from_ymd(now.year(), now.month(), 1).and_hms(0, 0, 0),
Some(s) => s?,
};
let end = match cmd.value_of("end").map(::chrono::naive::NaiveDateTime::from_str) {
None => {
// Is it much harder to go to the last second of the current month than to the first
// second of the next month, right?
let (year, month) = if now.month() == 12 {
(now.year() + 1, 1)
} else {
(now.year(), now.month() + 1)
};
NaiveDate::from_ymd(year, month, 1).and_hms(0, 0, 0)
},
Some(s) => s?,
};
let tags = cmd
.values_of("tags")
.map(|ts| ts.map(String::from).map(TimeTrackingTag::from).collect::<Vec<_>>());
let start_time_filter = has_start_time_where(move |dt: &NaiveDateTime| {
start <= *dt
});
let end_time_filter = has_end_time_where(move |dt: &NaiveDateTime| {
end >= *dt
});
let tags_filter = move |fle: &FileLockEntry| {
match tags {
Some(ref tags) => has_one_of_tags(&tags).filter(fle),
None => true,
}
};
tags_filter.and(start_time_filter).and(end_time_filter)
};
rt.store()
.get_timetrackings()?
.filter_ok(|e| filter.filter(e))
.and_then_ok(|e| -> Result<_> {
debug!("Processing {:?}", e.get_location());
let tag = e.get_timetrack_tag()?;
debug!(" -> tag = {:?}", tag);
let start = e.get_start_datetime()?;
debug!(" -> start = {:?}", start);
let end = e.get_end_datetime()?;
debug!(" -> end = {:?}", end);
rt.report_touched(e.get_location())
.map_err(Error::from)
.map(|_| (tag, start, end))
})
.and_then_ok(|(tag, start, end)| {
match (start, end) {
(None, _) => writeln!(rt.stdout(), "{} has no start time.", tag),
(Some(s), None) => writeln!(rt.stdout(), "{} | {} -...", tag, s),
(Some(s), Some(e)) => writeln!(rt.stdout(), "{} | {} - {}", tag, s, e),
}.map_err(Error::from)
})
.collect::<Result<Vec<_>>>()
.map(|_| ())
}
| month | identifier_name |
month.rs | //
// imag - the personal information management suite for the commandline
// Copyright (C) 2015-2020 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use std::io::Write;
use std::str::FromStr;
use filters::filter::Filter;
use chrono::NaiveDateTime;
use anyhow::Error;
use anyhow::Result;
use resiter::AndThen;
use resiter::Filter as RFilter;
use libimagstore::store::FileLockEntry;
use libimagtimetrack::store::TimeTrackStore;
use libimagtimetrack::tag::TimeTrackingTag;
use libimagtimetrack::iter::filter::*;
use libimagtimetrack::timetracking::TimeTracking;
use libimagrt::runtime::Runtime;
pub fn month(rt: &Runtime) -> Result<()> {
let cmd = rt.cli().subcommand().1.unwrap(); // checked in main
let filter = {
use chrono::offset::Local;
use chrono::naive::NaiveDate;
use chrono::Datelike;
let now = Local::now();
let start = match cmd.value_of("start").map(::chrono::naive::NaiveDateTime::from_str) {
None => NaiveDate::from_ymd(now.year(), now.month(), 1).and_hms(0, 0, 0),
Some(s) => s?,
};
let end = match cmd.value_of("end").map(::chrono::naive::NaiveDateTime::from_str) {
None => {
// Is it much harder to go to the last second of the current month than to the first
// second of the next month, right?
let (year, month) = if now.month() == 12 {
(now.year() + 1, 1)
} else {
(now.year(), now.month() + 1)
};
NaiveDate::from_ymd(year, month, 1).and_hms(0, 0, 0)
},
Some(s) => s?,
};
let tags = cmd
.values_of("tags")
.map(|ts| ts.map(String::from).map(TimeTrackingTag::from).collect::<Vec<_>>());
let start_time_filter = has_start_time_where(move |dt: &NaiveDateTime| {
start <= *dt
});
let end_time_filter = has_end_time_where(move |dt: &NaiveDateTime| {
end >= *dt
});
let tags_filter = move |fle: &FileLockEntry| {
match tags {
Some(ref tags) => has_one_of_tags(&tags).filter(fle),
None => true,
}
};
tags_filter.and(start_time_filter).and(end_time_filter)
};
rt.store()
.get_timetrackings()?
.filter_ok(|e| filter.filter(e))
.and_then_ok(|e| -> Result<_> { | debug!(" -> tag = {:?}", tag);
let start = e.get_start_datetime()?;
debug!(" -> start = {:?}", start);
let end = e.get_end_datetime()?;
debug!(" -> end = {:?}", end);
rt.report_touched(e.get_location())
.map_err(Error::from)
.map(|_| (tag, start, end))
})
.and_then_ok(|(tag, start, end)| {
match (start, end) {
(None, _) => writeln!(rt.stdout(), "{} has no start time.", tag),
(Some(s), None) => writeln!(rt.stdout(), "{} | {} -...", tag, s),
(Some(s), Some(e)) => writeln!(rt.stdout(), "{} | {} - {}", tag, s, e),
}.map_err(Error::from)
})
.collect::<Result<Vec<_>>>()
.map(|_| ())
} | debug!("Processing {:?}", e.get_location());
let tag = e.get_timetrack_tag()?; | random_line_split |
move_.rs |
use crate::direction::Direction;
/// This structure contains everything needed to do or undo a Sokoban move.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct Move {
/// Was a crate moved?
pub moves_crate: bool,
/// Where was the move directed?
pub direction: Direction,
}
impl Move {
pub fn new(direction: Direction, moves_crate: bool) -> Self {
Move {
moves_crate,
direction,
}
}
/// Describe a move using one character signifying its direction. The character is upper case
/// if and only if `self.moves_crate` is true.
pub fn to_char(&self) -> char {
let mut c = match self.direction {
Direction::Left => 'l',
Direction::Right => 'r',
Direction::Up => 'u',
Direction::Down => 'd',
};
if self.moves_crate {
c.make_ascii_uppercase();
}
c
}
}
/// Parse a string representation of moves.
pub fn parse(s: &str) -> Result<Vec<Move>, char> {
s.chars().map(Move::try_from).collect::<Result<Vec<_>, _>>()
}
impl fmt::Display for Move {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.to_char())
}
}
impl TryFrom<char> for Move {
type Error = char;
fn try_from(c: char) -> Result<Move, char> {
use crate::Direction::*;
let dir = match c {
'l' | 'L' => Left,
'r' | 'R' => Right,
'u' | 'U' => Up,
'd' | 'D' => Down,
_ => return Err(c),
};
let push = c.is_ascii_uppercase();
Ok(Move::new(dir, push))
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn to_from() {
for &dir in &crate::direction::DIRECTIONS {
let mv = Move::new(dir, true);
assert_eq!(Ok(mv.clone()), Move::try_from(mv.to_char()));
let mv = Move::new(dir, false);
assert_eq!(Ok(mv.clone()), Move::try_from(mv.to_char()));
}
}
#[test]
fn invalid_char() {
for chr in "abcefghijkmnopqstvwxyz".chars() {
assert!(Move::try_from(chr).is_err());
}
}
#[test]
fn parse_str() {
let s = "UldrdddDddlLrrRRuLulLLUUdrdlduUDLR";
let moves = parse(s).unwrap();
let s2: String = moves.into_iter().map(|x| x.to_char()).collect();
assert_eq!(s, s2);
}
} | use std::convert::TryFrom;
use std::fmt; | random_line_split |
|
move_.rs | use std::convert::TryFrom;
use std::fmt;
use crate::direction::Direction;
/// This structure contains everything needed to do or undo a Sokoban move.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct Move {
/// Was a crate moved?
pub moves_crate: bool,
/// Where was the move directed?
pub direction: Direction,
}
impl Move {
pub fn new(direction: Direction, moves_crate: bool) -> Self {
Move {
moves_crate,
direction,
}
}
/// Describe a move using one character signifying its direction. The character is upper case
/// if and only if `self.moves_crate` is true.
pub fn to_char(&self) -> char {
let mut c = match self.direction {
Direction::Left => 'l',
Direction::Right => 'r',
Direction::Up => 'u',
Direction::Down => 'd',
};
if self.moves_crate |
c
}
}
/// Parse a string representation of moves.
pub fn parse(s: &str) -> Result<Vec<Move>, char> {
s.chars().map(Move::try_from).collect::<Result<Vec<_>, _>>()
}
impl fmt::Display for Move {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.to_char())
}
}
impl TryFrom<char> for Move {
type Error = char;
fn try_from(c: char) -> Result<Move, char> {
use crate::Direction::*;
let dir = match c {
'l' | 'L' => Left,
'r' | 'R' => Right,
'u' | 'U' => Up,
'd' | 'D' => Down,
_ => return Err(c),
};
let push = c.is_ascii_uppercase();
Ok(Move::new(dir, push))
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn to_from() {
for &dir in &crate::direction::DIRECTIONS {
let mv = Move::new(dir, true);
assert_eq!(Ok(mv.clone()), Move::try_from(mv.to_char()));
let mv = Move::new(dir, false);
assert_eq!(Ok(mv.clone()), Move::try_from(mv.to_char()));
}
}
#[test]
fn invalid_char() {
for chr in "abcefghijkmnopqstvwxyz".chars() {
assert!(Move::try_from(chr).is_err());
}
}
#[test]
fn parse_str() {
let s = "UldrdddDddlLrrRRuLulLLUUdrdlduUDLR";
let moves = parse(s).unwrap();
let s2: String = moves.into_iter().map(|x| x.to_char()).collect();
assert_eq!(s, s2);
}
}
| {
c.make_ascii_uppercase();
} | conditional_block |
move_.rs | use std::convert::TryFrom;
use std::fmt;
use crate::direction::Direction;
/// This structure contains everything needed to do or undo a Sokoban move.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct Move {
/// Was a crate moved?
pub moves_crate: bool,
/// Where was the move directed?
pub direction: Direction,
}
impl Move {
pub fn new(direction: Direction, moves_crate: bool) -> Self {
Move {
moves_crate,
direction,
}
}
/// Describe a move using one character signifying its direction. The character is upper case
/// if and only if `self.moves_crate` is true.
pub fn | (&self) -> char {
let mut c = match self.direction {
Direction::Left => 'l',
Direction::Right => 'r',
Direction::Up => 'u',
Direction::Down => 'd',
};
if self.moves_crate {
c.make_ascii_uppercase();
}
c
}
}
/// Parse a string representation of moves.
pub fn parse(s: &str) -> Result<Vec<Move>, char> {
s.chars().map(Move::try_from).collect::<Result<Vec<_>, _>>()
}
impl fmt::Display for Move {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.to_char())
}
}
impl TryFrom<char> for Move {
type Error = char;
fn try_from(c: char) -> Result<Move, char> {
use crate::Direction::*;
let dir = match c {
'l' | 'L' => Left,
'r' | 'R' => Right,
'u' | 'U' => Up,
'd' | 'D' => Down,
_ => return Err(c),
};
let push = c.is_ascii_uppercase();
Ok(Move::new(dir, push))
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn to_from() {
for &dir in &crate::direction::DIRECTIONS {
let mv = Move::new(dir, true);
assert_eq!(Ok(mv.clone()), Move::try_from(mv.to_char()));
let mv = Move::new(dir, false);
assert_eq!(Ok(mv.clone()), Move::try_from(mv.to_char()));
}
}
#[test]
fn invalid_char() {
for chr in "abcefghijkmnopqstvwxyz".chars() {
assert!(Move::try_from(chr).is_err());
}
}
#[test]
fn parse_str() {
let s = "UldrdddDddlLrrRRuLulLLUUdrdlduUDLR";
let moves = parse(s).unwrap();
let s2: String = moves.into_iter().map(|x| x.to_char()).collect();
assert_eq!(s, s2);
}
}
| to_char | identifier_name |
move_.rs | use std::convert::TryFrom;
use std::fmt;
use crate::direction::Direction;
/// This structure contains everything needed to do or undo a Sokoban move.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct Move {
/// Was a crate moved?
pub moves_crate: bool,
/// Where was the move directed?
pub direction: Direction,
}
impl Move {
pub fn new(direction: Direction, moves_crate: bool) -> Self {
Move {
moves_crate,
direction,
}
}
/// Describe a move using one character signifying its direction. The character is upper case
/// if and only if `self.moves_crate` is true.
pub fn to_char(&self) -> char {
let mut c = match self.direction {
Direction::Left => 'l',
Direction::Right => 'r',
Direction::Up => 'u',
Direction::Down => 'd',
};
if self.moves_crate {
c.make_ascii_uppercase();
}
c
}
}
/// Parse a string representation of moves.
pub fn parse(s: &str) -> Result<Vec<Move>, char> {
s.chars().map(Move::try_from).collect::<Result<Vec<_>, _>>()
}
impl fmt::Display for Move {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result |
}
impl TryFrom<char> for Move {
type Error = char;
fn try_from(c: char) -> Result<Move, char> {
use crate::Direction::*;
let dir = match c {
'l' | 'L' => Left,
'r' | 'R' => Right,
'u' | 'U' => Up,
'd' | 'D' => Down,
_ => return Err(c),
};
let push = c.is_ascii_uppercase();
Ok(Move::new(dir, push))
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn to_from() {
for &dir in &crate::direction::DIRECTIONS {
let mv = Move::new(dir, true);
assert_eq!(Ok(mv.clone()), Move::try_from(mv.to_char()));
let mv = Move::new(dir, false);
assert_eq!(Ok(mv.clone()), Move::try_from(mv.to_char()));
}
}
#[test]
fn invalid_char() {
for chr in "abcefghijkmnopqstvwxyz".chars() {
assert!(Move::try_from(chr).is_err());
}
}
#[test]
fn parse_str() {
let s = "UldrdddDddlLrrRRuLulLLUUdrdlduUDLR";
let moves = parse(s).unwrap();
let s2: String = moves.into_iter().map(|x| x.to_char()).collect();
assert_eq!(s, s2);
}
}
| {
write!(f, "{}", self.to_char())
} | identifier_body |
100_doors.rs | // Implements http://rosettacode.org/wiki/100_doors
use std::num::Float;
use std::iter::Map;
use std::ops::Range;
type DoorIter = Map<Range<u32>, fn(u32) -> DoorState>;
#[derive(Debug, PartialEq)]
enum DoorState { Open, Closed, }
// This is an example of returning an iterator, this allows the caller to
// choose if they want to allocate or just process as a stream.
fn calculate_doors() -> DoorIter {
fn door_status(door_number: u32) -> DoorState {
let x = (door_number as f64).sqrt();
if x == x.round() { DoorState::Open } else { DoorState::Closed }
}
(1u32..101).map(door_status as fn(u32) -> DoorState)
}
#[cfg(not(test))]
fn main() {
let doors = calculate_doors();
for (i, x) in doors.enumerate() { println!("Door {} is {:?}", i + 1, x); }
}
#[test]
fn solution() | {
let doors = calculate_doors().collect::<Vec<DoorState>>();
// test that the doors with index corresponding to
// a perfect square are now open
for i in 1..11 {
assert_eq!(doors[i*i - 1], DoorState::Open);
}
} | identifier_body |
|
100_doors.rs | // Implements http://rosettacode.org/wiki/100_doors
use std::num::Float;
use std::iter::Map;
use std::ops::Range;
type DoorIter = Map<Range<u32>, fn(u32) -> DoorState>;
#[derive(Debug, PartialEq)]
enum DoorState { Open, Closed, }
// This is an example of returning an iterator, this allows the caller to
// choose if they want to allocate or just process as a stream.
fn calculate_doors() -> DoorIter {
fn door_status(door_number: u32) -> DoorState {
let x = (door_number as f64).sqrt();
if x == x.round() { DoorState::Open } else |
}
(1u32..101).map(door_status as fn(u32) -> DoorState)
}
#[cfg(not(test))]
fn main() {
let doors = calculate_doors();
for (i, x) in doors.enumerate() { println!("Door {} is {:?}", i + 1, x); }
}
#[test]
fn solution() {
let doors = calculate_doors().collect::<Vec<DoorState>>();
// test that the doors with index corresponding to
// a perfect square are now open
for i in 1..11 {
assert_eq!(doors[i*i - 1], DoorState::Open);
}
}
| { DoorState::Closed } | conditional_block |
100_doors.rs | // Implements http://rosettacode.org/wiki/100_doors
use std::num::Float;
use std::iter::Map;
use std::ops::Range;
type DoorIter = Map<Range<u32>, fn(u32) -> DoorState>;
#[derive(Debug, PartialEq)]
enum DoorState { Open, Closed, }
// This is an example of returning an iterator, this allows the caller to
// choose if they want to allocate or just process as a stream.
fn | () -> DoorIter {
fn door_status(door_number: u32) -> DoorState {
let x = (door_number as f64).sqrt();
if x == x.round() { DoorState::Open } else { DoorState::Closed }
}
(1u32..101).map(door_status as fn(u32) -> DoorState)
}
#[cfg(not(test))]
fn main() {
let doors = calculate_doors();
for (i, x) in doors.enumerate() { println!("Door {} is {:?}", i + 1, x); }
}
#[test]
fn solution() {
let doors = calculate_doors().collect::<Vec<DoorState>>();
// test that the doors with index corresponding to
// a perfect square are now open
for i in 1..11 {
assert_eq!(doors[i*i - 1], DoorState::Open);
}
}
| calculate_doors | identifier_name |
100_doors.rs | // Implements http://rosettacode.org/wiki/100_doors |
#[derive(Debug, PartialEq)]
enum DoorState { Open, Closed, }
// This is an example of returning an iterator, this allows the caller to
// choose if they want to allocate or just process as a stream.
fn calculate_doors() -> DoorIter {
fn door_status(door_number: u32) -> DoorState {
let x = (door_number as f64).sqrt();
if x == x.round() { DoorState::Open } else { DoorState::Closed }
}
(1u32..101).map(door_status as fn(u32) -> DoorState)
}
#[cfg(not(test))]
fn main() {
let doors = calculate_doors();
for (i, x) in doors.enumerate() { println!("Door {} is {:?}", i + 1, x); }
}
#[test]
fn solution() {
let doors = calculate_doors().collect::<Vec<DoorState>>();
// test that the doors with index corresponding to
// a perfect square are now open
for i in 1..11 {
assert_eq!(doors[i*i - 1], DoorState::Open);
}
} | use std::num::Float;
use std::iter::Map;
use std::ops::Range;
type DoorIter = Map<Range<u32>, fn(u32) -> DoorState>; | random_line_split |
attr-stmt-expr.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:attr-stmt-expr.rs
#![feature(proc_macro_hygiene)]
extern crate attr_stmt_expr;
use attr_stmt_expr::{expect_let, expect_print_stmt, expect_expr, expect_print_expr};
fn | (string: &'static str) {
// macros are handled a bit differently
#[expect_print_expr]
//~^ ERROR attributes on expressions are experimental
//~| HELP add #![feature(stmt_expr_attributes)] to the crate attributes to enable
println!("{}", string)
}
fn main() {
#[expect_let]
let string = "Hello, world!";
#[expect_print_stmt]
println!("{}", string);
#[expect_expr]
//~^ ERROR attributes on expressions are experimental
//~| HELP add #![feature(stmt_expr_attributes)] to the crate attributes to enable
print_str("string")
}
| print_str | identifier_name |
attr-stmt-expr.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:attr-stmt-expr.rs
#![feature(proc_macro_hygiene)]
extern crate attr_stmt_expr;
use attr_stmt_expr::{expect_let, expect_print_stmt, expect_expr, expect_print_expr};
fn print_str(string: &'static str) {
// macros are handled a bit differently | }
fn main() {
#[expect_let]
let string = "Hello, world!";
#[expect_print_stmt]
println!("{}", string);
#[expect_expr]
//~^ ERROR attributes on expressions are experimental
//~| HELP add #![feature(stmt_expr_attributes)] to the crate attributes to enable
print_str("string")
} | #[expect_print_expr]
//~^ ERROR attributes on expressions are experimental
//~| HELP add #![feature(stmt_expr_attributes)] to the crate attributes to enable
println!("{}", string) | random_line_split |
attr-stmt-expr.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:attr-stmt-expr.rs
#![feature(proc_macro_hygiene)]
extern crate attr_stmt_expr;
use attr_stmt_expr::{expect_let, expect_print_stmt, expect_expr, expect_print_expr};
fn print_str(string: &'static str) {
// macros are handled a bit differently
#[expect_print_expr]
//~^ ERROR attributes on expressions are experimental
//~| HELP add #![feature(stmt_expr_attributes)] to the crate attributes to enable
println!("{}", string)
}
fn main() | {
#[expect_let]
let string = "Hello, world!";
#[expect_print_stmt]
println!("{}", string);
#[expect_expr]
//~^ ERROR attributes on expressions are experimental
//~| HELP add #![feature(stmt_expr_attributes)] to the crate attributes to enable
print_str("string")
} | identifier_body |
|
buttons-input.rs | /*
* Copyright (c) 2018 Boucher, Antoni <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
use gtk::{
EditableSignals,
Inhibit,
prelude::ButtonExt,
prelude::EntryExt,
prelude::LabelExt,
prelude::OrientableExt,
prelude::WidgetExt,
};
use gtk::Orientation::{Horizontal, Vertical};
use relm::{Relm, Widget};
use relm_derive::{Msg, widget};
use self::Msg::*;
pub struct Model {
left_text: String,
relm: Relm<Win>,
right_text: String,
text: String,
}
#[derive(Clone, Msg)]
pub enum Msg {
Cancel,
Concat,
DataAvailable(String),
DataCleared,
LeftChanged(String),
RightChanged(String),
Quit,
}
#[widget]
impl Widget for Win {
fn model(relm: &Relm<Self>, (): ()) -> Model {
Model {
left_text: String::new(),
right_text: String::new(),
relm: relm.clone(),
text: String::new(),
}
}
fn update(&mut self, event: Msg) {
match event {
Cancel => {
self.model.left_text = String::new();
self.model.right_text = String::new();
self.model.text = String::new();
self.model.relm.stream().emit(DataCleared);
},
Concat => | ,
// To be listened to by the user.
DataAvailable(_) | DataCleared => (),
LeftChanged(text) => self.model.left_text = text,
RightChanged(text) => self.model.right_text = text,
Quit => gtk::main_quit(),
}
}
view! {
#[name="window"]
gtk::Window {
gtk::Box {
gtk::Box {
#[name="left_entry"]
gtk::Entry {
text: &self.model.left_text,
changed(entry) => LeftChanged(entry.text().to_string()),
},
#[name="right_entry"]
gtk::Entry {
text: &self.model.right_text,
changed(entry) => RightChanged(entry.text().to_string()),
},
orientation: Horizontal,
},
gtk::ButtonBox {
#[name="concat_button"]
gtk::Button {
clicked => Concat,
label: "Concat",
},
#[name="cancel_button"]
gtk::Button {
clicked => Cancel,
label: "Cancel",
},
orientation: Horizontal,
},
orientation: Vertical,
#[name="label"]
gtk::Label {
label: &self.model.text,
},
},
delete_event(_, _) => (Quit, Inhibit(false)),
}
}
}
#[cfg(test)]
mod tests {
use gdk::keys::constants as key;
use gtk::prelude::{
EntryExt,
GtkWindowExt,
LabelExt,
WidgetExt,
};
use gtk_test::{
assert_text,
focus,
};
use relm_test::{
enter_key,
enter_keys,
relm_observer_new,
relm_observer_wait,
};
use crate::Msg::{DataAvailable, DataCleared};
use crate::Win;
#[test]
fn label_change() {
let (component, _, widgets) = relm::init_test::<Win>(()).expect("init_test failed");
let cancel_button = &widgets.cancel_button;
let concat_button = &widgets.concat_button;
let label = &widgets.label;
let left_entry = &widgets.left_entry;
let right_entry = &widgets.right_entry;
let window = &widgets.window;
let available_observer = relm_observer_new!(component, DataAvailable(_));
let cleared_observer = relm_observer_new!(component, DataCleared);
assert_text!(label, "");
enter_keys(&window.focused_widget().expect("focused widget"), "left");
enter_key(window, key::Tab);
assert!(right_entry.has_focus());
enter_keys(&window.focused_widget().expect("focused widget"), "right");
enter_key(window, key::Tab);
assert!(concat_button.has_focus());
enter_key(
&window.focused_widget().expect("focused widget"),
key::space,
);
assert_text!(label, "leftright");
enter_key(window, key::Tab);
assert!(cancel_button.has_focus());
enter_key(
&window.focused_widget().expect("focused widget"),
key::space,
);
assert_text!(label, "");
assert_text!(left_entry, "");
assert_text!(right_entry, "");
focus(left_entry);
assert!(left_entry.has_focus());
focus(right_entry);
assert!(right_entry.has_focus());
relm_observer_wait!(let DataAvailable(text) = available_observer);
assert_eq!(text, "leftright");
relm_observer_wait!(let DataCleared = cleared_observer);
}
}
| {
self.model.text = format!("{}{}", self.model.left_text, self.model.right_text);
self.model.relm.stream().emit(DataAvailable(self.model.text.clone()));
} | conditional_block |
buttons-input.rs | /*
* Copyright (c) 2018 Boucher, Antoni <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
use gtk::{
EditableSignals,
Inhibit,
prelude::ButtonExt,
prelude::EntryExt,
prelude::LabelExt,
prelude::OrientableExt,
prelude::WidgetExt,
};
use gtk::Orientation::{Horizontal, Vertical};
use relm::{Relm, Widget};
use relm_derive::{Msg, widget};
use self::Msg::*;
pub struct Model {
left_text: String,
relm: Relm<Win>,
right_text: String,
text: String,
}
#[derive(Clone, Msg)]
pub enum Msg {
Cancel,
Concat,
DataAvailable(String),
DataCleared,
LeftChanged(String),
RightChanged(String),
Quit,
}
#[widget]
impl Widget for Win {
fn model(relm: &Relm<Self>, (): ()) -> Model {
Model {
left_text: String::new(),
right_text: String::new(),
relm: relm.clone(),
text: String::new(),
}
}
fn update(&mut self, event: Msg) {
match event {
Cancel => {
self.model.left_text = String::new();
self.model.right_text = String::new();
self.model.text = String::new();
self.model.relm.stream().emit(DataCleared);
},
Concat => {
self.model.text = format!("{}{}", self.model.left_text, self.model.right_text);
self.model.relm.stream().emit(DataAvailable(self.model.text.clone()));
},
// To be listened to by the user.
DataAvailable(_) | DataCleared => (),
LeftChanged(text) => self.model.left_text = text,
RightChanged(text) => self.model.right_text = text,
Quit => gtk::main_quit(),
}
}
view! {
#[name="window"]
gtk::Window {
gtk::Box {
gtk::Box {
#[name="left_entry"]
gtk::Entry {
text: &self.model.left_text,
changed(entry) => LeftChanged(entry.text().to_string()),
},
#[name="right_entry"]
gtk::Entry {
text: &self.model.right_text,
changed(entry) => RightChanged(entry.text().to_string()),
},
orientation: Horizontal,
},
gtk::ButtonBox {
#[name="concat_button"]
gtk::Button {
clicked => Concat,
label: "Concat",
},
#[name="cancel_button"]
gtk::Button {
clicked => Cancel,
label: "Cancel",
},
orientation: Horizontal,
},
orientation: Vertical,
#[name="label"]
gtk::Label {
label: &self.model.text,
},
},
delete_event(_, _) => (Quit, Inhibit(false)),
}
}
}
#[cfg(test)]
mod tests {
use gdk::keys::constants as key;
use gtk::prelude::{
EntryExt,
GtkWindowExt,
LabelExt,
WidgetExt,
};
use gtk_test::{
assert_text,
focus,
};
use relm_test::{
enter_key,
enter_keys,
relm_observer_new,
relm_observer_wait,
};
use crate::Msg::{DataAvailable, DataCleared};
use crate::Win;
#[test]
fn | () {
let (component, _, widgets) = relm::init_test::<Win>(()).expect("init_test failed");
let cancel_button = &widgets.cancel_button;
let concat_button = &widgets.concat_button;
let label = &widgets.label;
let left_entry = &widgets.left_entry;
let right_entry = &widgets.right_entry;
let window = &widgets.window;
let available_observer = relm_observer_new!(component, DataAvailable(_));
let cleared_observer = relm_observer_new!(component, DataCleared);
assert_text!(label, "");
enter_keys(&window.focused_widget().expect("focused widget"), "left");
enter_key(window, key::Tab);
assert!(right_entry.has_focus());
enter_keys(&window.focused_widget().expect("focused widget"), "right");
enter_key(window, key::Tab);
assert!(concat_button.has_focus());
enter_key(
&window.focused_widget().expect("focused widget"),
key::space,
);
assert_text!(label, "leftright");
enter_key(window, key::Tab);
assert!(cancel_button.has_focus());
enter_key(
&window.focused_widget().expect("focused widget"),
key::space,
);
assert_text!(label, "");
assert_text!(left_entry, "");
assert_text!(right_entry, "");
focus(left_entry);
assert!(left_entry.has_focus());
focus(right_entry);
assert!(right_entry.has_focus());
relm_observer_wait!(let DataAvailable(text) = available_observer);
assert_eq!(text, "leftright");
relm_observer_wait!(let DataCleared = cleared_observer);
}
}
| label_change | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.