file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
types.rs | use game::{Position, Move, Score, NumPlies, NumMoves};
#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone, Debug)]
pub struct NumNodes(pub u64);
#[derive(Clone, Debug)]
pub struct State {
pub pos: Position,
pub prev_pos: Option<Position>,
pub prev_move: Option<Move>,
pub param: Param,
}
#[derive(Clone, Debug)]
pub struct Param {
pub ponder: bool,
pub search_moves: Option<Vec<Move>>,
pub depth: Option<NumPlies>,
pub nodes: Option<NumNodes>, | pub hash_size: usize,
}
impl Param {
pub fn new(hash_size: usize) -> Self {
Param {
ponder: false,
search_moves: None,
depth: None,
nodes: None,
mate: None,
hash_size: hash_size,
}
}
}
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub enum Cmd {
SetDebug(bool),
PonderHit,
Stop,
}
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct BestMove(pub Move, pub Option<Move>);
#[derive(Clone, Debug)]
pub struct Report {
pub data: Data,
pub score: Score,
pub pv: Vec<Move>,
}
#[derive(Clone, Debug)]
pub struct Data {
pub nodes: NumNodes,
pub depth: NumPlies,
}
// TODO put actual data here
#[derive(Clone, Debug)]
pub struct InnerData {
pub nodes: NumNodes,
}
impl InnerData {
pub fn one_node() -> InnerData { InnerData { nodes: NumNodes(1) } }
pub fn combine(self, other: InnerData) -> InnerData {
InnerData { nodes: NumNodes(self.nodes.0 + other.nodes.0) }
}
pub fn increment(self) -> InnerData {
InnerData { nodes: NumNodes(self.nodes.0 + 1) }
}
} | pub mate: Option<NumMoves>, | random_line_split |
progressevent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::ProgressEventBinding;
use crate::dom::bindings::codegen::Bindings::ProgressEventBinding::ProgressEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::event::{Event, EventBubbles, EventCancelable};
use crate::dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
use servo_atoms::Atom;
#[dom_struct]
pub struct ProgressEvent {
event: Event,
length_computable: bool,
loaded: u64,
total: u64,
}
impl ProgressEvent {
fn new_inherited(length_computable: bool, loaded: u64, total: u64) -> ProgressEvent {
ProgressEvent {
event: Event::new_inherited(),
length_computable: length_computable,
loaded: loaded,
total: total,
}
}
pub fn new(
global: &GlobalScope,
type_: Atom,
can_bubble: EventBubbles,
cancelable: EventCancelable,
length_computable: bool,
loaded: u64,
total: u64,
) -> DomRoot<ProgressEvent> {
let ev = reflect_dom_object(
Box::new(ProgressEvent::new_inherited(
length_computable,
loaded,
total,
)),
global,
ProgressEventBinding::Wrap,
);
{
let event = ev.upcast::<Event>();
event.init_event(type_, bool::from(can_bubble), bool::from(cancelable));
}
ev
}
pub fn Constructor(
global: &GlobalScope,
type_: DOMString,
init: &ProgressEventBinding::ProgressEventInit,
) -> Fallible<DomRoot<ProgressEvent>> {
let bubbles = EventBubbles::from(init.parent.bubbles);
let cancelable = EventCancelable::from(init.parent.cancelable);
let ev = ProgressEvent::new(
global,
Atom::from(type_),
bubbles,
cancelable,
init.lengthComputable,
init.loaded,
init.total,
);
Ok(ev)
}
}
impl ProgressEventMethods for ProgressEvent {
// https://xhr.spec.whatwg.org/#dom-progressevent-lengthcomputable
fn LengthComputable(&self) -> bool {
self.length_computable
}
// https://xhr.spec.whatwg.org/#dom-progressevent-loaded
fn | (&self) -> u64 {
self.loaded
}
// https://xhr.spec.whatwg.org/#dom-progressevent-total
fn Total(&self) -> u64 {
self.total
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
}
| Loaded | identifier_name |
progressevent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::ProgressEventBinding;
use crate::dom::bindings::codegen::Bindings::ProgressEventBinding::ProgressEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::event::{Event, EventBubbles, EventCancelable};
use crate::dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
use servo_atoms::Atom;
#[dom_struct]
pub struct ProgressEvent {
event: Event,
length_computable: bool,
loaded: u64,
total: u64,
}
impl ProgressEvent {
fn new_inherited(length_computable: bool, loaded: u64, total: u64) -> ProgressEvent {
ProgressEvent {
event: Event::new_inherited(),
length_computable: length_computable,
loaded: loaded,
total: total, | type_: Atom,
can_bubble: EventBubbles,
cancelable: EventCancelable,
length_computable: bool,
loaded: u64,
total: u64,
) -> DomRoot<ProgressEvent> {
let ev = reflect_dom_object(
Box::new(ProgressEvent::new_inherited(
length_computable,
loaded,
total,
)),
global,
ProgressEventBinding::Wrap,
);
{
let event = ev.upcast::<Event>();
event.init_event(type_, bool::from(can_bubble), bool::from(cancelable));
}
ev
}
pub fn Constructor(
global: &GlobalScope,
type_: DOMString,
init: &ProgressEventBinding::ProgressEventInit,
) -> Fallible<DomRoot<ProgressEvent>> {
let bubbles = EventBubbles::from(init.parent.bubbles);
let cancelable = EventCancelable::from(init.parent.cancelable);
let ev = ProgressEvent::new(
global,
Atom::from(type_),
bubbles,
cancelable,
init.lengthComputable,
init.loaded,
init.total,
);
Ok(ev)
}
}
impl ProgressEventMethods for ProgressEvent {
// https://xhr.spec.whatwg.org/#dom-progressevent-lengthcomputable
fn LengthComputable(&self) -> bool {
self.length_computable
}
// https://xhr.spec.whatwg.org/#dom-progressevent-loaded
fn Loaded(&self) -> u64 {
self.loaded
}
// https://xhr.spec.whatwg.org/#dom-progressevent-total
fn Total(&self) -> u64 {
self.total
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
} | }
}
pub fn new(
global: &GlobalScope, | random_line_split |
assembunny.rs | use std::io::prelude::*;
use std::fs::File;
struct Registers {
a: i32,
b: i32,
c: i32,
d: i32
}
impl Registers {
fn new() -> Registers {
Registers{a: 0, b: 0, c: 0, d: 0}
}
fn get(&self, name: &str) -> i32 |
fn inc(&mut self, name: &str) {
let curr_val = self.get(name);
self.set(name, curr_val + 1);
}
fn dec(&mut self, name: &str) {
let curr_val = self.get(name);
self.set(name, curr_val - 1);
}
fn set(&mut self, name: &str, val: i32) {
match name {
"a" => self.a = val,
"b" => self.b = val,
"c" => self.c = val,
"d" => self.d = val,
_ => panic!("invalid register {}!", name),
}
}
}
fn main() {
let mut f = File::open("data.txt").expect("unable to open file");
let mut data = String::new();
f.read_to_string(&mut data).expect("unable to read file");
let mut instructions: Vec<&str> = data.split("\n").collect();
instructions.pop();
let mut regs = Registers::new();
let mut current_ins: i32 = 0;
loop {
if current_ins < 0 || current_ins as usize >= instructions.len() {
break;
}
let (op, args_str) = instructions[current_ins as usize].split_at(3);
let args: Vec<&str> = args_str.trim().split(" ").collect();
match op {
"cpy" => match args[0].parse::<i32>() {
Ok(n) => regs.set(args[1], n),
Err(_) => {
let val = regs.get(args[0]);
regs.set(args[1], val);
},
},
"inc" => regs.inc(args[0]),
"dec" => regs.dec(args[0]),
"jnz" => if match args[0].parse::<i32>() {
Ok(n) => n!= 0,
Err(_) => regs.get(args[0])!= 0,
} {
current_ins += args[1].parse::<i32>().unwrap();
continue;
},
_ => (),
}
current_ins += 1;
}
println!("{}", regs.a);
}
| {
match name {
"a" => self.a,
"b" => self.b,
"c" => self.c,
"d" => self.d,
_ => panic!("invalid register {}!", name),
}
} | identifier_body |
assembunny.rs | use std::io::prelude::*;
use std::fs::File;
struct Registers {
a: i32,
b: i32,
c: i32,
d: i32
}
impl Registers {
fn new() -> Registers {
Registers{a: 0, b: 0, c: 0, d: 0}
}
fn get(&self, name: &str) -> i32 {
match name {
"a" => self.a,
"b" => self.b,
"c" => self.c,
"d" => self.d,
_ => panic!("invalid register {}!", name),
}
}
fn | (&mut self, name: &str) {
let curr_val = self.get(name);
self.set(name, curr_val + 1);
}
fn dec(&mut self, name: &str) {
let curr_val = self.get(name);
self.set(name, curr_val - 1);
}
fn set(&mut self, name: &str, val: i32) {
match name {
"a" => self.a = val,
"b" => self.b = val,
"c" => self.c = val,
"d" => self.d = val,
_ => panic!("invalid register {}!", name),
}
}
}
fn main() {
let mut f = File::open("data.txt").expect("unable to open file");
let mut data = String::new();
f.read_to_string(&mut data).expect("unable to read file");
let mut instructions: Vec<&str> = data.split("\n").collect();
instructions.pop();
let mut regs = Registers::new();
let mut current_ins: i32 = 0;
loop {
if current_ins < 0 || current_ins as usize >= instructions.len() {
break;
}
let (op, args_str) = instructions[current_ins as usize].split_at(3);
let args: Vec<&str> = args_str.trim().split(" ").collect();
match op {
"cpy" => match args[0].parse::<i32>() {
Ok(n) => regs.set(args[1], n),
Err(_) => {
let val = regs.get(args[0]);
regs.set(args[1], val);
},
},
"inc" => regs.inc(args[0]),
"dec" => regs.dec(args[0]),
"jnz" => if match args[0].parse::<i32>() {
Ok(n) => n!= 0,
Err(_) => regs.get(args[0])!= 0,
} {
current_ins += args[1].parse::<i32>().unwrap();
continue;
},
_ => (),
}
current_ins += 1;
}
println!("{}", regs.a);
}
| inc | identifier_name |
assembunny.rs | use std::io::prelude::*;
use std::fs::File;
struct Registers {
a: i32,
b: i32,
c: i32,
d: i32
}
impl Registers {
fn new() -> Registers {
Registers{a: 0, b: 0, c: 0, d: 0}
}
fn get(&self, name: &str) -> i32 {
match name {
"a" => self.a,
"b" => self.b,
"c" => self.c,
"d" => self.d,
_ => panic!("invalid register {}!", name),
}
}
fn inc(&mut self, name: &str) {
let curr_val = self.get(name);
self.set(name, curr_val + 1);
}
fn dec(&mut self, name: &str) {
let curr_val = self.get(name);
self.set(name, curr_val - 1);
}
fn set(&mut self, name: &str, val: i32) {
match name {
"a" => self.a = val,
"b" => self.b = val,
"c" => self.c = val,
"d" => self.d = val,
_ => panic!("invalid register {}!", name),
} | let mut data = String::new();
f.read_to_string(&mut data).expect("unable to read file");
let mut instructions: Vec<&str> = data.split("\n").collect();
instructions.pop();
let mut regs = Registers::new();
let mut current_ins: i32 = 0;
loop {
if current_ins < 0 || current_ins as usize >= instructions.len() {
break;
}
let (op, args_str) = instructions[current_ins as usize].split_at(3);
let args: Vec<&str> = args_str.trim().split(" ").collect();
match op {
"cpy" => match args[0].parse::<i32>() {
Ok(n) => regs.set(args[1], n),
Err(_) => {
let val = regs.get(args[0]);
regs.set(args[1], val);
},
},
"inc" => regs.inc(args[0]),
"dec" => regs.dec(args[0]),
"jnz" => if match args[0].parse::<i32>() {
Ok(n) => n!= 0,
Err(_) => regs.get(args[0])!= 0,
} {
current_ins += args[1].parse::<i32>().unwrap();
continue;
},
_ => (),
}
current_ins += 1;
}
println!("{}", regs.a);
} | }
}
fn main() {
let mut f = File::open("data.txt").expect("unable to open file"); | random_line_split |
serde.rs | // Copyright 2017 TiKV Project Authors. Licensed under Apache-2.0.
use serde::de::{self, Deserialize, Deserializer, MapAccess, SeqAccess, Visitor};
use serde::ser::{Error as SerError, Serialize, SerializeMap, SerializeTuple, Serializer};
use std::collections::BTreeMap;
use std::fmt;
use std::str::FromStr;
use std::string::ToString;
use std::{f64, str};
use super::{Json, JsonRef, JsonType};
use crate::codec::Error;
impl<'a> ToString for JsonRef<'a> {
fn to_string(&self) -> String {
serde_json::to_string(self).unwrap()
}
}
impl<'a> Serialize for JsonRef<'a> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self.get_type() {
JsonType::Literal => match self.get_literal() {
Some(b) => serializer.serialize_bool(b),
None => serializer.serialize_none(),
},
JsonType::String => match self.get_str() {
Ok(s) => serializer.serialize_str(s),
Err(_) => Err(SerError::custom("json contains invalid UTF-8 characters")),
},
JsonType::Double => serializer.serialize_f64(self.get_double()),
JsonType::I64 => serializer.serialize_i64(self.get_i64()),
JsonType::U64 => serializer.serialize_u64(self.get_u64()),
JsonType::Object => {
let elem_count = self.get_elem_count();
let mut map = serializer.serialize_map(Some(elem_count))?;
for i in 0..elem_count {
let key = self.object_get_key(i);
let val = self.object_get_val(i).map_err(SerError::custom)?;
map.serialize_entry(str::from_utf8(key).unwrap(), &val)?;
}
map.end()
}
JsonType::Array => {
let elem_count = self.get_elem_count();
let mut tup = serializer.serialize_tuple(elem_count)?;
for i in 0..elem_count {
let item = self.array_get_elem(i).map_err(SerError::custom)?;
tup.serialize_element(&item)?;
}
tup.end()
}
}
}
}
impl ToString for Json {
fn to_string(&self) -> String {
serde_json::to_string(&self.as_ref()).unwrap()
}
}
impl FromStr for Json {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> |
}
struct JsonVisitor;
impl<'de> Visitor<'de> for JsonVisitor {
type Value = Json;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(formatter, "a json value")
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Json::none().map_err(de::Error::custom)?)
}
fn visit_bool<E>(self, v: bool) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Json::from_bool(v).map_err(de::Error::custom)?)
}
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Json::from_i64(v).map_err(de::Error::custom)?)
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
if v > (std::i64::MAX as u64) {
Ok(Json::from_f64(v as f64).map_err(de::Error::custom)?)
} else {
Ok(Json::from_i64(v as i64).map_err(de::Error::custom)?)
}
}
fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Json::from_f64(v).map_err(de::Error::custom)?)
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Json::from_string(String::from(v)).map_err(de::Error::custom)?)
}
fn visit_seq<M>(self, mut seq: M) -> Result<Self::Value, M::Error>
where
M: SeqAccess<'de>,
{
let size = seq.size_hint().unwrap_or_default();
let mut value = Vec::with_capacity(size);
while let Some(v) = seq.next_element()? {
value.push(v);
}
Ok(Json::from_array(value).map_err(de::Error::custom)?)
}
fn visit_map<M>(self, mut access: M) -> Result<Self::Value, M::Error>
where
M: MapAccess<'de>,
{
let mut map = BTreeMap::new();
while let Some((key, value)) = access.next_entry()? {
map.insert(key, value);
}
Ok(Json::from_object(map).map_err(de::Error::custom)?)
}
}
impl<'de> Deserialize<'de> for Json {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_any(JsonVisitor)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_from_str_for_object() {
let jstr1 = r#"{"a": [1, "2", {"aa": "bb"}, 4.0, null], "c": null,"b": true}"#;
let j1: Json = jstr1.parse().unwrap();
let jstr2 = j1.to_string();
let expect_str = r#"{"a":[1,"2",{"aa":"bb"},4.0,null],"b":true,"c":null}"#;
assert_eq!(jstr2, expect_str);
}
#[test]
fn test_from_str() {
let legal_cases = vec![
(r#"{"key":"value"}"#),
(r#"["d1","d2"]"#),
(r#"-3"#),
(r#"3"#),
(r#"3.0"#),
(r#"null"#),
(r#"true"#),
(r#"false"#),
];
for json_str in legal_cases {
let resp = Json::from_str(json_str);
assert!(resp.is_ok());
}
let cases = vec![
(
r#"9223372036854776000"#,
Json::from_f64(9223372036854776000.0),
),
(
r#"9223372036854775807"#,
Json::from_i64(9223372036854775807),
),
];
for (json_str, json) in cases {
let resp = Json::from_str(json_str);
assert!(resp.is_ok());
assert_eq!(resp.unwrap(), json.unwrap());
}
let illegal_cases = vec!["[pxx,apaa]", "hpeheh", ""];
for json_str in illegal_cases {
let resp = Json::from_str(json_str);
assert!(resp.is_err());
}
}
}
| {
match serde_json::from_str(s) {
Ok(value) => Ok(value),
Err(e) => Err(invalid_type!("Illegal Json text: {:?}", e)),
}
} | identifier_body |
serde.rs | // Copyright 2017 TiKV Project Authors. Licensed under Apache-2.0.
use serde::de::{self, Deserialize, Deserializer, MapAccess, SeqAccess, Visitor};
use serde::ser::{Error as SerError, Serialize, SerializeMap, SerializeTuple, Serializer};
use std::collections::BTreeMap;
use std::fmt;
use std::str::FromStr;
use std::string::ToString;
use std::{f64, str};
use super::{Json, JsonRef, JsonType};
use crate::codec::Error;
impl<'a> ToString for JsonRef<'a> {
fn to_string(&self) -> String {
serde_json::to_string(self).unwrap()
}
}
impl<'a> Serialize for JsonRef<'a> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self.get_type() {
JsonType::Literal => match self.get_literal() {
Some(b) => serializer.serialize_bool(b),
None => serializer.serialize_none(),
},
JsonType::String => match self.get_str() {
Ok(s) => serializer.serialize_str(s),
Err(_) => Err(SerError::custom("json contains invalid UTF-8 characters")),
},
JsonType::Double => serializer.serialize_f64(self.get_double()),
JsonType::I64 => serializer.serialize_i64(self.get_i64()),
JsonType::U64 => serializer.serialize_u64(self.get_u64()),
JsonType::Object => {
let elem_count = self.get_elem_count();
let mut map = serializer.serialize_map(Some(elem_count))?;
for i in 0..elem_count {
let key = self.object_get_key(i);
let val = self.object_get_val(i).map_err(SerError::custom)?;
map.serialize_entry(str::from_utf8(key).unwrap(), &val)?;
}
map.end()
}
JsonType::Array => {
let elem_count = self.get_elem_count();
let mut tup = serializer.serialize_tuple(elem_count)?;
for i in 0..elem_count {
let item = self.array_get_elem(i).map_err(SerError::custom)?;
tup.serialize_element(&item)?; | }
}
}
impl ToString for Json {
fn to_string(&self) -> String {
serde_json::to_string(&self.as_ref()).unwrap()
}
}
impl FromStr for Json {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match serde_json::from_str(s) {
Ok(value) => Ok(value),
Err(e) => Err(invalid_type!("Illegal Json text: {:?}", e)),
}
}
}
struct JsonVisitor;
impl<'de> Visitor<'de> for JsonVisitor {
type Value = Json;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(formatter, "a json value")
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Json::none().map_err(de::Error::custom)?)
}
fn visit_bool<E>(self, v: bool) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Json::from_bool(v).map_err(de::Error::custom)?)
}
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Json::from_i64(v).map_err(de::Error::custom)?)
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
if v > (std::i64::MAX as u64) {
Ok(Json::from_f64(v as f64).map_err(de::Error::custom)?)
} else {
Ok(Json::from_i64(v as i64).map_err(de::Error::custom)?)
}
}
fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Json::from_f64(v).map_err(de::Error::custom)?)
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Json::from_string(String::from(v)).map_err(de::Error::custom)?)
}
fn visit_seq<M>(self, mut seq: M) -> Result<Self::Value, M::Error>
where
M: SeqAccess<'de>,
{
let size = seq.size_hint().unwrap_or_default();
let mut value = Vec::with_capacity(size);
while let Some(v) = seq.next_element()? {
value.push(v);
}
Ok(Json::from_array(value).map_err(de::Error::custom)?)
}
fn visit_map<M>(self, mut access: M) -> Result<Self::Value, M::Error>
where
M: MapAccess<'de>,
{
let mut map = BTreeMap::new();
while let Some((key, value)) = access.next_entry()? {
map.insert(key, value);
}
Ok(Json::from_object(map).map_err(de::Error::custom)?)
}
}
impl<'de> Deserialize<'de> for Json {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_any(JsonVisitor)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_from_str_for_object() {
let jstr1 = r#"{"a": [1, "2", {"aa": "bb"}, 4.0, null], "c": null,"b": true}"#;
let j1: Json = jstr1.parse().unwrap();
let jstr2 = j1.to_string();
let expect_str = r#"{"a":[1,"2",{"aa":"bb"},4.0,null],"b":true,"c":null}"#;
assert_eq!(jstr2, expect_str);
}
#[test]
fn test_from_str() {
let legal_cases = vec![
(r#"{"key":"value"}"#),
(r#"["d1","d2"]"#),
(r#"-3"#),
(r#"3"#),
(r#"3.0"#),
(r#"null"#),
(r#"true"#),
(r#"false"#),
];
for json_str in legal_cases {
let resp = Json::from_str(json_str);
assert!(resp.is_ok());
}
let cases = vec![
(
r#"9223372036854776000"#,
Json::from_f64(9223372036854776000.0),
),
(
r#"9223372036854775807"#,
Json::from_i64(9223372036854775807),
),
];
for (json_str, json) in cases {
let resp = Json::from_str(json_str);
assert!(resp.is_ok());
assert_eq!(resp.unwrap(), json.unwrap());
}
let illegal_cases = vec!["[pxx,apaa]", "hpeheh", ""];
for json_str in illegal_cases {
let resp = Json::from_str(json_str);
assert!(resp.is_err());
}
}
} | }
tup.end()
} | random_line_split |
serde.rs | // Copyright 2017 TiKV Project Authors. Licensed under Apache-2.0.
use serde::de::{self, Deserialize, Deserializer, MapAccess, SeqAccess, Visitor};
use serde::ser::{Error as SerError, Serialize, SerializeMap, SerializeTuple, Serializer};
use std::collections::BTreeMap;
use std::fmt;
use std::str::FromStr;
use std::string::ToString;
use std::{f64, str};
use super::{Json, JsonRef, JsonType};
use crate::codec::Error;
impl<'a> ToString for JsonRef<'a> {
fn to_string(&self) -> String {
serde_json::to_string(self).unwrap()
}
}
impl<'a> Serialize for JsonRef<'a> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self.get_type() {
JsonType::Literal => match self.get_literal() {
Some(b) => serializer.serialize_bool(b),
None => serializer.serialize_none(),
},
JsonType::String => match self.get_str() {
Ok(s) => serializer.serialize_str(s),
Err(_) => Err(SerError::custom("json contains invalid UTF-8 characters")),
},
JsonType::Double => serializer.serialize_f64(self.get_double()),
JsonType::I64 => serializer.serialize_i64(self.get_i64()),
JsonType::U64 => serializer.serialize_u64(self.get_u64()),
JsonType::Object => {
let elem_count = self.get_elem_count();
let mut map = serializer.serialize_map(Some(elem_count))?;
for i in 0..elem_count {
let key = self.object_get_key(i);
let val = self.object_get_val(i).map_err(SerError::custom)?;
map.serialize_entry(str::from_utf8(key).unwrap(), &val)?;
}
map.end()
}
JsonType::Array => {
let elem_count = self.get_elem_count();
let mut tup = serializer.serialize_tuple(elem_count)?;
for i in 0..elem_count {
let item = self.array_get_elem(i).map_err(SerError::custom)?;
tup.serialize_element(&item)?;
}
tup.end()
}
}
}
}
impl ToString for Json {
fn to_string(&self) -> String {
serde_json::to_string(&self.as_ref()).unwrap()
}
}
impl FromStr for Json {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match serde_json::from_str(s) {
Ok(value) => Ok(value),
Err(e) => Err(invalid_type!("Illegal Json text: {:?}", e)),
}
}
}
struct JsonVisitor;
impl<'de> Visitor<'de> for JsonVisitor {
type Value = Json;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(formatter, "a json value")
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Json::none().map_err(de::Error::custom)?)
}
fn visit_bool<E>(self, v: bool) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Json::from_bool(v).map_err(de::Error::custom)?)
}
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Json::from_i64(v).map_err(de::Error::custom)?)
}
fn | <E>(self, v: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
if v > (std::i64::MAX as u64) {
Ok(Json::from_f64(v as f64).map_err(de::Error::custom)?)
} else {
Ok(Json::from_i64(v as i64).map_err(de::Error::custom)?)
}
}
fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Json::from_f64(v).map_err(de::Error::custom)?)
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Json::from_string(String::from(v)).map_err(de::Error::custom)?)
}
fn visit_seq<M>(self, mut seq: M) -> Result<Self::Value, M::Error>
where
M: SeqAccess<'de>,
{
let size = seq.size_hint().unwrap_or_default();
let mut value = Vec::with_capacity(size);
while let Some(v) = seq.next_element()? {
value.push(v);
}
Ok(Json::from_array(value).map_err(de::Error::custom)?)
}
fn visit_map<M>(self, mut access: M) -> Result<Self::Value, M::Error>
where
M: MapAccess<'de>,
{
let mut map = BTreeMap::new();
while let Some((key, value)) = access.next_entry()? {
map.insert(key, value);
}
Ok(Json::from_object(map).map_err(de::Error::custom)?)
}
}
impl<'de> Deserialize<'de> for Json {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_any(JsonVisitor)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_from_str_for_object() {
let jstr1 = r#"{"a": [1, "2", {"aa": "bb"}, 4.0, null], "c": null,"b": true}"#;
let j1: Json = jstr1.parse().unwrap();
let jstr2 = j1.to_string();
let expect_str = r#"{"a":[1,"2",{"aa":"bb"},4.0,null],"b":true,"c":null}"#;
assert_eq!(jstr2, expect_str);
}
#[test]
fn test_from_str() {
let legal_cases = vec![
(r#"{"key":"value"}"#),
(r#"["d1","d2"]"#),
(r#"-3"#),
(r#"3"#),
(r#"3.0"#),
(r#"null"#),
(r#"true"#),
(r#"false"#),
];
for json_str in legal_cases {
let resp = Json::from_str(json_str);
assert!(resp.is_ok());
}
let cases = vec![
(
r#"9223372036854776000"#,
Json::from_f64(9223372036854776000.0),
),
(
r#"9223372036854775807"#,
Json::from_i64(9223372036854775807),
),
];
for (json_str, json) in cases {
let resp = Json::from_str(json_str);
assert!(resp.is_ok());
assert_eq!(resp.unwrap(), json.unwrap());
}
let illegal_cases = vec!["[pxx,apaa]", "hpeheh", ""];
for json_str in illegal_cases {
let resp = Json::from_str(json_str);
assert!(resp.is_err());
}
}
}
| visit_u64 | identifier_name |
list.rs |
use super::{print_size, io, Header, Operation, App, Arg, ArgMatches, SubCommand, PathBuf, Regex,
RegexFault};
fn valid_path(x: String) -> Result<(), String> {
let p = PathBuf::from(&x);
match (p.exists(), p.is_file()) {
(true, true) => Ok(()),
(false, _) => Err(format!("Cannot process {} it does not exist", &x)),
(true, false) => Err(format!("Cannot process {} it is a directory or link", &x)),
}
}
fn valid_regex(x: String) -> Result<(), String> {
match Regex::new(&x) {
Ok(_) => Ok(()),
Err(RegexFault::CompiledTooBig(val)) => Err(format!(
"Input regex is too large. Set size limit {:?}",
val
)),
Err(RegexFault::Syntax(s)) => Err(format!("Regex Syntax Error: {}", s)),
Err(_) => Err(format!("Regex Syntax Error. Source undocumented :(")),
}
}
pub fn build<'a>() -> App<'static, 'a> {
SubCommand::with_name("list")
.about("lists contents of a regex")
.arg(
Arg::with_name("group")
.long("groupname")
.takes_value(false)
.next_line_help(true)
.help("display group name"),
)
.arg(
Arg::with_name("user")
.long("username")
.takes_value(false)
.next_line_help(true)
.help("display username"),
)
.arg(
Arg::with_name("uid")
.long("uid")
.takes_value(false)
.next_line_help(true)
.help("display uid"),
)
.arg(
Arg::with_name("gid")
.long("gid")
.takes_value(false)
.next_line_help(true)
.help("display gid"),
)
.arg(
Arg::with_name("size")
.long("size")
.takes_value(false)
.next_line_help(true)
.help("display file size"),
)
.arg(
Arg::with_name("file")
.short("f")
.long("file")
.takes_value(true)
.multiple(false)
.value_name("INFILE")
.required(true)
.validator(valid_path)
.next_line_help(true)
.help("file to read"),
)
.arg(
Arg::with_name("regex")
.short("r")
.long("regex")
.takes_value(true)
.multiple(false)
.value_name("REGEX")
.validator(valid_regex)
.next_line_help(true)
.help("regex to filter list by"),
)
}
/// print data
pub fn exec(
header: &Header,
regex: &Option<Regex>,
group: bool,
user: bool,
gid: bool,
uid: bool,
size: bool,
) -> io::Result<()> {
let flag = match regex {
&Option::None => true,
&Option::Some(ref regex) => {
let path = header.path()?;
match path.file_name() {
Option::None => false,
Option::Some(f_name) => {
match f_name.to_str() {
Option::None => false,
Option::Some(f_name_str) => regex.is_match(f_name_str),
}
}
}
}
};
if flag {
if group {
let g = match header.groupname() {
Ok(Option::Some(x)) => x,
Ok(Option::None) => "No groupname",
Err(_) => "UTF8 ERROR",
};
println!("\tGroup Name: {}", g);
}
let path = header.path()?;
println!("{:?}", path);
if user {
let u = match header.username() {
Ok(Option::Some(x)) => x,
Ok(Option::None) => "No username",
Err(_) => "UTF8 ERROR",
};
println!("\tUser Name: {}", u);
}
if gid {
println!("\tUser Group ID (gid): 0x{:X}", header.gid()?);
}
if uid {
println!("\tUser ID (uid): 0x{:X}", header.uid()?);
}
if size {
println!("\tSize: {}", print_size(header.size()?));
}
}
Ok(())
}
pub fn | (x: &ArgMatches) -> Operation {
Operation::List(
PathBuf::from(x.value_of("file").unwrap()),
match x.value_of("regex") {
Option::None => None,
Option::Some(r) => Regex::new(&r).ok(),
},
x.is_present("group"),
x.is_present("user"),
x.is_present("gid"),
x.is_present("uid"),
x.is_present("mtime"),
x.is_present("size"),
)
}
| get | identifier_name |
list.rs |
use super::{print_size, io, Header, Operation, App, Arg, ArgMatches, SubCommand, PathBuf, Regex,
RegexFault};
fn valid_path(x: String) -> Result<(), String> {
let p = PathBuf::from(&x);
match (p.exists(), p.is_file()) {
(true, true) => Ok(()),
(false, _) => Err(format!("Cannot process {} it does not exist", &x)),
(true, false) => Err(format!("Cannot process {} it is a directory or link", &x)),
}
}
fn valid_regex(x: String) -> Result<(), String> {
match Regex::new(&x) {
Ok(_) => Ok(()),
Err(RegexFault::CompiledTooBig(val)) => Err(format!(
"Input regex is too large. Set size limit {:?}",
val
)),
Err(RegexFault::Syntax(s)) => Err(format!("Regex Syntax Error: {}", s)),
Err(_) => Err(format!("Regex Syntax Error. Source undocumented :(")),
}
}
pub fn build<'a>() -> App<'static, 'a> | .takes_value(false)
.next_line_help(true)
.help("display uid"),
)
.arg(
Arg::with_name("gid")
.long("gid")
.takes_value(false)
.next_line_help(true)
.help("display gid"),
)
.arg(
Arg::with_name("size")
.long("size")
.takes_value(false)
.next_line_help(true)
.help("display file size"),
)
.arg(
Arg::with_name("file")
.short("f")
.long("file")
.takes_value(true)
.multiple(false)
.value_name("INFILE")
.required(true)
.validator(valid_path)
.next_line_help(true)
.help("file to read"),
)
.arg(
Arg::with_name("regex")
.short("r")
.long("regex")
.takes_value(true)
.multiple(false)
.value_name("REGEX")
.validator(valid_regex)
.next_line_help(true)
.help("regex to filter list by"),
)
}
/// print data
pub fn exec(
header: &Header,
regex: &Option<Regex>,
group: bool,
user: bool,
gid: bool,
uid: bool,
size: bool,
) -> io::Result<()> {
let flag = match regex {
&Option::None => true,
&Option::Some(ref regex) => {
let path = header.path()?;
match path.file_name() {
Option::None => false,
Option::Some(f_name) => {
match f_name.to_str() {
Option::None => false,
Option::Some(f_name_str) => regex.is_match(f_name_str),
}
}
}
}
};
if flag {
if group {
let g = match header.groupname() {
Ok(Option::Some(x)) => x,
Ok(Option::None) => "No groupname",
Err(_) => "UTF8 ERROR",
};
println!("\tGroup Name: {}", g);
}
let path = header.path()?;
println!("{:?}", path);
if user {
let u = match header.username() {
Ok(Option::Some(x)) => x,
Ok(Option::None) => "No username",
Err(_) => "UTF8 ERROR",
};
println!("\tUser Name: {}", u);
}
if gid {
println!("\tUser Group ID (gid): 0x{:X}", header.gid()?);
}
if uid {
println!("\tUser ID (uid): 0x{:X}", header.uid()?);
}
if size {
println!("\tSize: {}", print_size(header.size()?));
}
}
Ok(())
}
pub fn get(x: &ArgMatches) -> Operation {
Operation::List(
PathBuf::from(x.value_of("file").unwrap()),
match x.value_of("regex") {
Option::None => None,
Option::Some(r) => Regex::new(&r).ok(),
},
x.is_present("group"),
x.is_present("user"),
x.is_present("gid"),
x.is_present("uid"),
x.is_present("mtime"),
x.is_present("size"),
)
}
| {
SubCommand::with_name("list")
.about("lists contents of a regex")
.arg(
Arg::with_name("group")
.long("groupname")
.takes_value(false)
.next_line_help(true)
.help("display group name"),
)
.arg(
Arg::with_name("user")
.long("username")
.takes_value(false)
.next_line_help(true)
.help("display username"),
)
.arg(
Arg::with_name("uid")
.long("uid") | identifier_body |
list.rs | use super::{print_size, io, Header, Operation, App, Arg, ArgMatches, SubCommand, PathBuf, Regex,
RegexFault};
fn valid_path(x: String) -> Result<(), String> {
let p = PathBuf::from(&x);
match (p.exists(), p.is_file()) {
(true, true) => Ok(()),
(false, _) => Err(format!("Cannot process {} it does not exist", &x)),
(true, false) => Err(format!("Cannot process {} it is a directory or link", &x)),
}
}
fn valid_regex(x: String) -> Result<(), String> {
match Regex::new(&x) {
Ok(_) => Ok(()),
Err(RegexFault::CompiledTooBig(val)) => Err(format!(
"Input regex is too large. Set size limit {:?}",
val
)),
Err(RegexFault::Syntax(s)) => Err(format!("Regex Syntax Error: {}", s)),
Err(_) => Err(format!("Regex Syntax Error. Source undocumented :(")),
}
}
pub fn build<'a>() -> App<'static, 'a> {
SubCommand::with_name("list")
.about("lists contents of a regex")
.arg(
Arg::with_name("group")
.long("groupname")
.takes_value(false)
.next_line_help(true)
.help("display group name"),
)
.arg(
Arg::with_name("user")
.long("username")
.takes_value(false)
.next_line_help(true)
.help("display username"),
)
.arg(
Arg::with_name("uid")
.long("uid")
.takes_value(false)
.next_line_help(true)
.help("display uid"),
)
.arg(
Arg::with_name("gid")
.long("gid")
.takes_value(false)
.next_line_help(true)
.help("display gid"),
)
.arg(
Arg::with_name("size")
.long("size")
.takes_value(false)
.next_line_help(true)
.help("display file size"),
)
.arg(
Arg::with_name("file")
.short("f")
.long("file")
.takes_value(true)
.multiple(false)
.value_name("INFILE")
.required(true)
.validator(valid_path)
.next_line_help(true)
.help("file to read"),
)
.arg( | .takes_value(true)
.multiple(false)
.value_name("REGEX")
.validator(valid_regex)
.next_line_help(true)
.help("regex to filter list by"),
)
}
/// print data
pub fn exec(
header: &Header,
regex: &Option<Regex>,
group: bool,
user: bool,
gid: bool,
uid: bool,
size: bool,
) -> io::Result<()> {
let flag = match regex {
&Option::None => true,
&Option::Some(ref regex) => {
let path = header.path()?;
match path.file_name() {
Option::None => false,
Option::Some(f_name) => {
match f_name.to_str() {
Option::None => false,
Option::Some(f_name_str) => regex.is_match(f_name_str),
}
}
}
}
};
if flag {
if group {
let g = match header.groupname() {
Ok(Option::Some(x)) => x,
Ok(Option::None) => "No groupname",
Err(_) => "UTF8 ERROR",
};
println!("\tGroup Name: {}", g);
}
let path = header.path()?;
println!("{:?}", path);
if user {
let u = match header.username() {
Ok(Option::Some(x)) => x,
Ok(Option::None) => "No username",
Err(_) => "UTF8 ERROR",
};
println!("\tUser Name: {}", u);
}
if gid {
println!("\tUser Group ID (gid): 0x{:X}", header.gid()?);
}
if uid {
println!("\tUser ID (uid): 0x{:X}", header.uid()?);
}
if size {
println!("\tSize: {}", print_size(header.size()?));
}
}
Ok(())
}
pub fn get(x: &ArgMatches) -> Operation {
Operation::List(
PathBuf::from(x.value_of("file").unwrap()),
match x.value_of("regex") {
Option::None => None,
Option::Some(r) => Regex::new(&r).ok(),
},
x.is_present("group"),
x.is_present("user"),
x.is_present("gid"),
x.is_present("uid"),
x.is_present("mtime"),
x.is_present("size"),
)
} | Arg::with_name("regex")
.short("r")
.long("regex") | random_line_split |
long-live-the-unsized-temporary.rs | #![allow(incomplete_features)]
#![feature(unsized_locals, unsized_fn_params)]
use std::fmt;
fn gen_foo() -> Box<fmt::Display> {
Box::new(Box::new("foo"))
}
fn foo(x: fmt::Display) {
assert_eq!(x.to_string(), "foo");
}
fn foo_indirect(x: fmt::Display) {
foo(x);
}
fn main() | if cnt == 0 {
break x;
} else {
cnt -= 1;
}
};
foo(x);
}
{
let x: fmt::Display = *gen_foo();
let x = if true { x } else { *gen_foo() };
foo(x);
}
}
| {
foo(*gen_foo());
foo_indirect(*gen_foo());
{
let x: fmt::Display = *gen_foo();
foo(x);
}
{
let x: fmt::Display = *gen_foo();
let y: fmt::Display = *gen_foo();
foo(x);
foo(y);
}
{
let mut cnt: usize = 3;
let x = loop {
let x: fmt::Display = *gen_foo(); | identifier_body |
long-live-the-unsized-temporary.rs | #![allow(incomplete_features)]
#![feature(unsized_locals, unsized_fn_params)]
use std::fmt;
fn gen_foo() -> Box<fmt::Display> {
Box::new(Box::new("foo"))
}
fn foo(x: fmt::Display) {
assert_eq!(x.to_string(), "foo");
} | fn foo_indirect(x: fmt::Display) {
foo(x);
}
fn main() {
foo(*gen_foo());
foo_indirect(*gen_foo());
{
let x: fmt::Display = *gen_foo();
foo(x);
}
{
let x: fmt::Display = *gen_foo();
let y: fmt::Display = *gen_foo();
foo(x);
foo(y);
}
{
let mut cnt: usize = 3;
let x = loop {
let x: fmt::Display = *gen_foo();
if cnt == 0 {
break x;
} else {
cnt -= 1;
}
};
foo(x);
}
{
let x: fmt::Display = *gen_foo();
let x = if true { x } else { *gen_foo() };
foo(x);
}
} | random_line_split |
|
long-live-the-unsized-temporary.rs | #![allow(incomplete_features)]
#![feature(unsized_locals, unsized_fn_params)]
use std::fmt;
fn gen_foo() -> Box<fmt::Display> {
Box::new(Box::new("foo"))
}
fn foo(x: fmt::Display) {
assert_eq!(x.to_string(), "foo");
}
fn foo_indirect(x: fmt::Display) {
foo(x);
}
fn main() {
foo(*gen_foo());
foo_indirect(*gen_foo());
{
let x: fmt::Display = *gen_foo();
foo(x);
}
{
let x: fmt::Display = *gen_foo();
let y: fmt::Display = *gen_foo();
foo(x);
foo(y);
}
{
let mut cnt: usize = 3;
let x = loop {
let x: fmt::Display = *gen_foo();
if cnt == 0 {
break x;
} else |
};
foo(x);
}
{
let x: fmt::Display = *gen_foo();
let x = if true { x } else { *gen_foo() };
foo(x);
}
}
| {
cnt -= 1;
} | conditional_block |
long-live-the-unsized-temporary.rs | #![allow(incomplete_features)]
#![feature(unsized_locals, unsized_fn_params)]
use std::fmt;
fn gen_foo() -> Box<fmt::Display> {
Box::new(Box::new("foo"))
}
fn | (x: fmt::Display) {
assert_eq!(x.to_string(), "foo");
}
fn foo_indirect(x: fmt::Display) {
foo(x);
}
fn main() {
foo(*gen_foo());
foo_indirect(*gen_foo());
{
let x: fmt::Display = *gen_foo();
foo(x);
}
{
let x: fmt::Display = *gen_foo();
let y: fmt::Display = *gen_foo();
foo(x);
foo(y);
}
{
let mut cnt: usize = 3;
let x = loop {
let x: fmt::Display = *gen_foo();
if cnt == 0 {
break x;
} else {
cnt -= 1;
}
};
foo(x);
}
{
let x: fmt::Display = *gen_foo();
let x = if true { x } else { *gen_foo() };
foo(x);
}
}
| foo | identifier_name |
mod.rs | //! Sends an email using the client
use std::string::String;
use std::net::{SocketAddr, ToSocketAddrs};
use openssl::ssl::{SslContext, SslMethod};
use transport::error::{EmailResult, Error};
use transport::smtp::extension::{Extension, ServerInfo};
use transport::smtp::client::Client;
use transport::smtp::authentication::Mechanism;
use transport::EmailTransport;
use email::SendableEmail;
pub mod extension;
pub mod authentication;
pub mod response;
pub mod client;
// Registrated port numbers:
// https://www.iana.
// org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml
/// Default smtp port
pub static SMTP_PORT: u16 = 25;
/// Default submission port
pub static SUBMISSION_PORT: u16 = 587;
// Useful strings and characters
/// The word separator for SMTP transactions
pub static SP: &'static str = " ";
/// The line ending for SMTP transactions (carriage return + line feed)
pub static CRLF: &'static str = "\r\n";
/// Colon
pub static COLON: &'static str = ":";
/// The ending of message content
pub static MESSAGE_ENDING: &'static str = "\r\n.\r\n";
/// NUL unicode character
pub static NUL: &'static str = "\0";
/// TLS security level
#[derive(Debug)]
pub enum SecurityLevel {
/// Use a TLS wrapped connection
///
/// Non RFC-compliant, should only be used if the server does not support STARTTLS.
EncryptedWrapper,
/// Only send an email on encrypted connection (with STARTTLS)
///
/// Recommended mode, prevents MITM when used with verified certificates.
AlwaysEncrypt,
/// Use TLS when available (with STARTTLS)
///
/// Default mode.
Opportunistic,
/// Never use TLS
NeverEncrypt,
}
/// Contains client configuration
pub struct SmtpTransportBuilder {
/// Maximum connection reuse
///
/// Zero means no limitation
connection_reuse_count_limit: u16,
/// Enable connection reuse
connection_reuse: bool,
/// Name sent during HELO or EHLO
hello_name: String,
/// Credentials
credentials: Option<(String, String)>,
/// Socket we are connecting to
server_addr: SocketAddr,
/// SSL contexyt to use
ssl_context: SslContext,
/// TLS security level
security_level: SecurityLevel,
/// Enable UTF8 mailboxes in enveloppe or headers
smtp_utf8: bool,
/// List of authentication mechanism, sorted by priority
authentication_mechanisms: Vec<Mechanism>,
}
/// Builder for the SMTP SmtpTransport
impl SmtpTransportBuilder {
/// Creates a new local SMTP client
pub fn new<A: ToSocketAddrs>(addr: A) -> Result<SmtpTransportBuilder, Error> {
let mut addresses = try!(addr.to_socket_addrs());
match addresses.next() {
Some(addr) => Ok(SmtpTransportBuilder {
server_addr: addr,
ssl_context: SslContext::new(SslMethod::Tlsv1).unwrap(),
security_level: SecurityLevel::Opportunistic,
smtp_utf8: false,
credentials: None,
connection_reuse_count_limit: 100,
connection_reuse: false,
hello_name: "localhost".to_string(),
authentication_mechanisms: vec![Mechanism::CramMd5, Mechanism::Plain],
}),
None => Err(From::from("Could nor resolve hostname")),
}
}
/// Creates a new local SMTP client to port 25
pub fn localhost() -> Result<SmtpTransportBuilder, Error> {
SmtpTransportBuilder::new(("localhost", SMTP_PORT))
}
/// Use STARTTLS with a specific context
pub fn ssl_context(mut self, ssl_context: SslContext) -> SmtpTransportBuilder {
self.ssl_context = ssl_context;
self
}
/// Set the security level for SSL/TLS
pub fn security_level(mut self, level: SecurityLevel) -> SmtpTransportBuilder {
self.security_level = level;
self
}
/// Require SSL/TLS using STARTTLS
///
/// Incompatible with `ssl_wrapper()``
pub fn encrypt(mut self) -> SmtpTransportBuilder {
self.security_level = SecurityLevel::AlwaysEncrypt;
self
}
/// Require SSL/TLS using STARTTLS
///
/// Incompatible with `encrypt()`
pub fn ssl_wrapper(mut self) -> SmtpTransportBuilder {
self.security_level = SecurityLevel::EncryptedWrapper;
self
}
/// Enable SMTPUTF8 if the server supports it
pub fn smtp_utf8(mut self, enabled: bool) -> SmtpTransportBuilder {
self.smtp_utf8 = enabled;
self
}
/// Set the name used during HELO or EHLO
pub fn hello_name(mut self, name: &str) -> SmtpTransportBuilder {
self.hello_name = name.to_string();
self
}
/// Enable connection reuse
pub fn | (mut self, enable: bool) -> SmtpTransportBuilder {
self.connection_reuse = enable;
self
}
/// Set the maximum number of emails sent using one connection
pub fn connection_reuse_count_limit(mut self, limit: u16) -> SmtpTransportBuilder {
self.connection_reuse_count_limit = limit;
self
}
/// Set the client credentials
pub fn credentials(mut self, username: &str, password: &str) -> SmtpTransportBuilder {
self.credentials = Some((username.to_string(), password.to_string()));
self
}
/// Set the authentication mechanisms
pub fn authentication_mechanisms(mut self, mechanisms: Vec<Mechanism>) -> SmtpTransportBuilder {
self.authentication_mechanisms = mechanisms;
self
}
/// Build the SMTP client
///
/// It does not connects to the server, but only creates the `SmtpTransport`
pub fn build(self) -> SmtpTransport {
SmtpTransport::new(self)
}
}
/// Represents the state of a client
#[derive(Debug)]
struct State {
/// Panic state
pub panic: bool,
/// Connection reuse counter
pub connection_reuse_count: u16,
}
/// Structure that implements the high level SMTP client
pub struct SmtpTransport {
/// Information about the server
/// Value is None before HELO/EHLO
server_info: Option<ServerInfo>,
/// SmtpTransport variable states
state: State,
/// Information about the client
client_info: SmtpTransportBuilder,
/// Low level client
client: Client,
}
macro_rules! try_smtp (
($err: expr, $client: ident) => ({
match $err {
Ok(val) => val,
Err(err) => {
if!$client.state.panic {
$client.state.panic = true;
$client.reset();
}
return Err(From::from(err))
},
}
})
);
impl SmtpTransport {
/// Creates a new SMTP client
///
/// It does not connects to the server, but only creates the `SmtpTransport`
pub fn new(builder: SmtpTransportBuilder) -> SmtpTransport {
let client = Client::new();
SmtpTransport {
client: client,
server_info: None,
client_info: builder,
state: State {
panic: false,
connection_reuse_count: 0,
},
}
}
/// Reset the client state
fn reset(&mut self) {
// Close the SMTP transaction if needed
self.close();
// Reset the client state
self.server_info = None;
self.state.panic = false;
self.state.connection_reuse_count = 0;
}
/// Gets the EHLO response and updates server information
pub fn get_ehlo(&mut self) -> EmailResult {
// Extended Hello
let ehlo_response = try_smtp!(self.client.ehlo(&self.client_info.hello_name), self);
self.server_info = Some(try_smtp!(ServerInfo::from_response(&ehlo_response), self));
// Print server information
debug!("server {}", self.server_info.as_ref().unwrap());
Ok(ehlo_response)
}
}
impl EmailTransport for SmtpTransport {
/// Sends an email
fn send<T: SendableEmail>(&mut self, email: T) -> EmailResult {
// Extract email information
let message_id = email.message_id();
let from_address = email.from_address();
let to_addresses = email.to_addresses();
let message = email.message();
// Check if the connection is still available
if self.state.connection_reuse_count > 0 {
if!self.client.is_connected() {
self.reset();
}
}
if self.state.connection_reuse_count == 0 {
try!(self.client.connect(&self.client_info.server_addr,
match &self.client_info.security_level {
&SecurityLevel::EncryptedWrapper =>
Some(&self.client_info.ssl_context),
_ => None,
}));
// Log the connection
info!("connection established to {}", self.client_info.server_addr);
try!(self.get_ehlo());
match (&self.client_info.security_level,
self.server_info.as_ref().unwrap().supports_feature(&Extension::StartTls)) {
(&SecurityLevel::AlwaysEncrypt, false) =>
return Err(From::from("Could not encrypt connection, aborting")),
(&SecurityLevel::Opportunistic, false) => (),
(&SecurityLevel::NeverEncrypt, _) => (),
(&SecurityLevel::EncryptedWrapper, _) => (),
(_, true) => {
try_smtp!(self.client.starttls(), self);
try_smtp!(self.client.upgrade_tls_stream(&self.client_info.ssl_context),
self);
debug!("connection encrypted");
// Send EHLO again
try!(self.get_ehlo());
}
}
if self.client_info.credentials.is_some() {
let (username, password) = self.client_info.credentials.clone().unwrap();
let mut found = false;
for mechanism in self.client_info.authentication_mechanisms.clone() {
if self.server_info.as_ref().unwrap().supports_auth_mechanism(mechanism) {
found = true;
try_smtp!(self.client.auth(mechanism, &username, &password), self);
break;
}
}
if!found {
info!("No supported authentication mechanisms available");
}
}
}
// Mail
let mail_options = match (self.server_info
.as_ref()
.unwrap()
.supports_feature(&Extension::EightBitMime),
self.server_info
.as_ref()
.unwrap()
.supports_feature(&Extension::SmtpUtfEight)) {
(true, true) => Some("BODY=8BITMIME SMTPUTF8"),
(true, false) => Some("BODY=8BITMIME"),
(false, _) => None,
};
try_smtp!(self.client.mail(&from_address, mail_options), self);
// Log the mail command
info!("{}: from=<{}>", message_id, from_address);
// Recipient
for to_address in to_addresses.iter() {
try_smtp!(self.client.rcpt(&to_address), self);
// Log the rcpt command
info!("{}: to=<{}>", message_id, to_address);
}
// Data
try_smtp!(self.client.data(), self);
// Message content
let result = self.client.message(&message);
if result.is_ok() {
// Increment the connection reuse counter
self.state.connection_reuse_count = self.state.connection_reuse_count + 1;
// Log the message
info!("{}: conn_use={}, size={}, status=sent ({})",
message_id,
self.state.connection_reuse_count,
message.len(),
result.as_ref()
.ok()
.unwrap()
.message()
.iter()
.next()
.unwrap_or(&"no response".to_string()));
}
// Test if we can reuse the existing connection
if (!self.client_info.connection_reuse) ||
(self.state.connection_reuse_count >= self.client_info.connection_reuse_count_limit) {
self.reset();
}
result
}
/// Closes the inner connection
fn close(&mut self) {
self.client.close();
}
}
| connection_reuse | identifier_name |
mod.rs | //! Sends an email using the client
use std::string::String;
use std::net::{SocketAddr, ToSocketAddrs};
use openssl::ssl::{SslContext, SslMethod};
use transport::error::{EmailResult, Error};
use transport::smtp::extension::{Extension, ServerInfo};
use transport::smtp::client::Client;
use transport::smtp::authentication::Mechanism;
use transport::EmailTransport;
use email::SendableEmail;
pub mod extension;
pub mod authentication;
pub mod response;
pub mod client;
// Registrated port numbers:
// https://www.iana.
// org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml
/// Default smtp port
pub static SMTP_PORT: u16 = 25;
/// Default submission port
pub static SUBMISSION_PORT: u16 = 587;
// Useful strings and characters
/// The word separator for SMTP transactions
pub static SP: &'static str = " ";
/// The line ending for SMTP transactions (carriage return + line feed)
pub static CRLF: &'static str = "\r\n";
/// Colon
pub static COLON: &'static str = ":";
/// The ending of message content
pub static MESSAGE_ENDING: &'static str = "\r\n.\r\n";
/// NUL unicode character
pub static NUL: &'static str = "\0";
/// TLS security level
#[derive(Debug)]
pub enum SecurityLevel {
/// Use a TLS wrapped connection
///
/// Non RFC-compliant, should only be used if the server does not support STARTTLS.
EncryptedWrapper,
/// Only send an email on encrypted connection (with STARTTLS)
///
/// Recommended mode, prevents MITM when used with verified certificates.
AlwaysEncrypt,
/// Use TLS when available (with STARTTLS)
///
/// Default mode.
Opportunistic,
/// Never use TLS
NeverEncrypt,
}
/// Contains client configuration
pub struct SmtpTransportBuilder {
/// Maximum connection reuse
///
/// Zero means no limitation
connection_reuse_count_limit: u16,
/// Enable connection reuse
connection_reuse: bool,
/// Name sent during HELO or EHLO
hello_name: String,
/// Credentials
credentials: Option<(String, String)>,
/// Socket we are connecting to
server_addr: SocketAddr,
/// SSL contexyt to use
ssl_context: SslContext,
/// TLS security level
security_level: SecurityLevel,
/// Enable UTF8 mailboxes in enveloppe or headers
smtp_utf8: bool,
/// List of authentication mechanism, sorted by priority
authentication_mechanisms: Vec<Mechanism>,
}
/// Builder for the SMTP SmtpTransport
impl SmtpTransportBuilder {
/// Creates a new local SMTP client
pub fn new<A: ToSocketAddrs>(addr: A) -> Result<SmtpTransportBuilder, Error> {
let mut addresses = try!(addr.to_socket_addrs());
match addresses.next() {
Some(addr) => Ok(SmtpTransportBuilder {
server_addr: addr,
ssl_context: SslContext::new(SslMethod::Tlsv1).unwrap(),
security_level: SecurityLevel::Opportunistic,
smtp_utf8: false,
credentials: None,
connection_reuse_count_limit: 100,
connection_reuse: false,
hello_name: "localhost".to_string(),
authentication_mechanisms: vec![Mechanism::CramMd5, Mechanism::Plain],
}),
None => Err(From::from("Could nor resolve hostname")),
}
}
/// Creates a new local SMTP client to port 25
pub fn localhost() -> Result<SmtpTransportBuilder, Error> {
SmtpTransportBuilder::new(("localhost", SMTP_PORT))
}
/// Use STARTTLS with a specific context
pub fn ssl_context(mut self, ssl_context: SslContext) -> SmtpTransportBuilder {
self.ssl_context = ssl_context;
self
}
/// Set the security level for SSL/TLS
pub fn security_level(mut self, level: SecurityLevel) -> SmtpTransportBuilder |
/// Require SSL/TLS using STARTTLS
///
/// Incompatible with `ssl_wrapper()``
pub fn encrypt(mut self) -> SmtpTransportBuilder {
self.security_level = SecurityLevel::AlwaysEncrypt;
self
}
/// Require SSL/TLS using STARTTLS
///
/// Incompatible with `encrypt()`
pub fn ssl_wrapper(mut self) -> SmtpTransportBuilder {
self.security_level = SecurityLevel::EncryptedWrapper;
self
}
/// Enable SMTPUTF8 if the server supports it
pub fn smtp_utf8(mut self, enabled: bool) -> SmtpTransportBuilder {
self.smtp_utf8 = enabled;
self
}
/// Set the name used during HELO or EHLO
pub fn hello_name(mut self, name: &str) -> SmtpTransportBuilder {
self.hello_name = name.to_string();
self
}
/// Enable connection reuse
pub fn connection_reuse(mut self, enable: bool) -> SmtpTransportBuilder {
self.connection_reuse = enable;
self
}
/// Set the maximum number of emails sent using one connection
pub fn connection_reuse_count_limit(mut self, limit: u16) -> SmtpTransportBuilder {
self.connection_reuse_count_limit = limit;
self
}
/// Set the client credentials
pub fn credentials(mut self, username: &str, password: &str) -> SmtpTransportBuilder {
self.credentials = Some((username.to_string(), password.to_string()));
self
}
/// Set the authentication mechanisms
pub fn authentication_mechanisms(mut self, mechanisms: Vec<Mechanism>) -> SmtpTransportBuilder {
self.authentication_mechanisms = mechanisms;
self
}
/// Build the SMTP client
///
/// It does not connects to the server, but only creates the `SmtpTransport`
pub fn build(self) -> SmtpTransport {
SmtpTransport::new(self)
}
}
/// Represents the state of a client
#[derive(Debug)]
struct State {
/// Panic state
pub panic: bool,
/// Connection reuse counter
pub connection_reuse_count: u16,
}
/// Structure that implements the high level SMTP client
pub struct SmtpTransport {
/// Information about the server
/// Value is None before HELO/EHLO
server_info: Option<ServerInfo>,
/// SmtpTransport variable states
state: State,
/// Information about the client
client_info: SmtpTransportBuilder,
/// Low level client
client: Client,
}
macro_rules! try_smtp (
($err: expr, $client: ident) => ({
match $err {
Ok(val) => val,
Err(err) => {
if!$client.state.panic {
$client.state.panic = true;
$client.reset();
}
return Err(From::from(err))
},
}
})
);
impl SmtpTransport {
/// Creates a new SMTP client
///
/// It does not connects to the server, but only creates the `SmtpTransport`
pub fn new(builder: SmtpTransportBuilder) -> SmtpTransport {
let client = Client::new();
SmtpTransport {
client: client,
server_info: None,
client_info: builder,
state: State {
panic: false,
connection_reuse_count: 0,
},
}
}
/// Reset the client state
fn reset(&mut self) {
// Close the SMTP transaction if needed
self.close();
// Reset the client state
self.server_info = None;
self.state.panic = false;
self.state.connection_reuse_count = 0;
}
/// Gets the EHLO response and updates server information
pub fn get_ehlo(&mut self) -> EmailResult {
// Extended Hello
let ehlo_response = try_smtp!(self.client.ehlo(&self.client_info.hello_name), self);
self.server_info = Some(try_smtp!(ServerInfo::from_response(&ehlo_response), self));
// Print server information
debug!("server {}", self.server_info.as_ref().unwrap());
Ok(ehlo_response)
}
}
impl EmailTransport for SmtpTransport {
/// Sends an email
fn send<T: SendableEmail>(&mut self, email: T) -> EmailResult {
// Extract email information
let message_id = email.message_id();
let from_address = email.from_address();
let to_addresses = email.to_addresses();
let message = email.message();
// Check if the connection is still available
if self.state.connection_reuse_count > 0 {
if!self.client.is_connected() {
self.reset();
}
}
if self.state.connection_reuse_count == 0 {
try!(self.client.connect(&self.client_info.server_addr,
match &self.client_info.security_level {
&SecurityLevel::EncryptedWrapper =>
Some(&self.client_info.ssl_context),
_ => None,
}));
// Log the connection
info!("connection established to {}", self.client_info.server_addr);
try!(self.get_ehlo());
match (&self.client_info.security_level,
self.server_info.as_ref().unwrap().supports_feature(&Extension::StartTls)) {
(&SecurityLevel::AlwaysEncrypt, false) =>
return Err(From::from("Could not encrypt connection, aborting")),
(&SecurityLevel::Opportunistic, false) => (),
(&SecurityLevel::NeverEncrypt, _) => (),
(&SecurityLevel::EncryptedWrapper, _) => (),
(_, true) => {
try_smtp!(self.client.starttls(), self);
try_smtp!(self.client.upgrade_tls_stream(&self.client_info.ssl_context),
self);
debug!("connection encrypted");
// Send EHLO again
try!(self.get_ehlo());
}
}
if self.client_info.credentials.is_some() {
let (username, password) = self.client_info.credentials.clone().unwrap();
let mut found = false;
for mechanism in self.client_info.authentication_mechanisms.clone() {
if self.server_info.as_ref().unwrap().supports_auth_mechanism(mechanism) {
found = true;
try_smtp!(self.client.auth(mechanism, &username, &password), self);
break;
}
}
if!found {
info!("No supported authentication mechanisms available");
}
}
}
// Mail
let mail_options = match (self.server_info
.as_ref()
.unwrap()
.supports_feature(&Extension::EightBitMime),
self.server_info
.as_ref()
.unwrap()
.supports_feature(&Extension::SmtpUtfEight)) {
(true, true) => Some("BODY=8BITMIME SMTPUTF8"),
(true, false) => Some("BODY=8BITMIME"),
(false, _) => None,
};
try_smtp!(self.client.mail(&from_address, mail_options), self);
// Log the mail command
info!("{}: from=<{}>", message_id, from_address);
// Recipient
for to_address in to_addresses.iter() {
try_smtp!(self.client.rcpt(&to_address), self);
// Log the rcpt command
info!("{}: to=<{}>", message_id, to_address);
}
// Data
try_smtp!(self.client.data(), self);
// Message content
let result = self.client.message(&message);
if result.is_ok() {
// Increment the connection reuse counter
self.state.connection_reuse_count = self.state.connection_reuse_count + 1;
// Log the message
info!("{}: conn_use={}, size={}, status=sent ({})",
message_id,
self.state.connection_reuse_count,
message.len(),
result.as_ref()
.ok()
.unwrap()
.message()
.iter()
.next()
.unwrap_or(&"no response".to_string()));
}
// Test if we can reuse the existing connection
if (!self.client_info.connection_reuse) ||
(self.state.connection_reuse_count >= self.client_info.connection_reuse_count_limit) {
self.reset();
}
result
}
/// Closes the inner connection
fn close(&mut self) {
self.client.close();
}
}
| {
self.security_level = level;
self
} | identifier_body |
mod.rs | //! Sends an email using the client
use std::string::String;
use std::net::{SocketAddr, ToSocketAddrs};
use openssl::ssl::{SslContext, SslMethod};
use transport::error::{EmailResult, Error};
use transport::smtp::extension::{Extension, ServerInfo};
use transport::smtp::client::Client;
use transport::smtp::authentication::Mechanism;
use transport::EmailTransport;
use email::SendableEmail;
pub mod extension;
pub mod authentication;
pub mod response; | pub mod client;
// Registrated port numbers:
// https://www.iana.
// org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml
/// Default smtp port
pub static SMTP_PORT: u16 = 25;
/// Default submission port
pub static SUBMISSION_PORT: u16 = 587;
// Useful strings and characters
/// The word separator for SMTP transactions
pub static SP: &'static str = " ";
/// The line ending for SMTP transactions (carriage return + line feed)
pub static CRLF: &'static str = "\r\n";
/// Colon
pub static COLON: &'static str = ":";
/// The ending of message content
pub static MESSAGE_ENDING: &'static str = "\r\n.\r\n";
/// NUL unicode character
pub static NUL: &'static str = "\0";
/// TLS security level
#[derive(Debug)]
pub enum SecurityLevel {
/// Use a TLS wrapped connection
///
/// Non RFC-compliant, should only be used if the server does not support STARTTLS.
EncryptedWrapper,
/// Only send an email on encrypted connection (with STARTTLS)
///
/// Recommended mode, prevents MITM when used with verified certificates.
AlwaysEncrypt,
/// Use TLS when available (with STARTTLS)
///
/// Default mode.
Opportunistic,
/// Never use TLS
NeverEncrypt,
}
/// Contains client configuration
pub struct SmtpTransportBuilder {
/// Maximum connection reuse
///
/// Zero means no limitation
connection_reuse_count_limit: u16,
/// Enable connection reuse
connection_reuse: bool,
/// Name sent during HELO or EHLO
hello_name: String,
/// Credentials
credentials: Option<(String, String)>,
/// Socket we are connecting to
server_addr: SocketAddr,
/// SSL contexyt to use
ssl_context: SslContext,
/// TLS security level
security_level: SecurityLevel,
/// Enable UTF8 mailboxes in enveloppe or headers
smtp_utf8: bool,
/// List of authentication mechanism, sorted by priority
authentication_mechanisms: Vec<Mechanism>,
}
/// Builder for the SMTP SmtpTransport
impl SmtpTransportBuilder {
/// Creates a new local SMTP client
pub fn new<A: ToSocketAddrs>(addr: A) -> Result<SmtpTransportBuilder, Error> {
let mut addresses = try!(addr.to_socket_addrs());
match addresses.next() {
Some(addr) => Ok(SmtpTransportBuilder {
server_addr: addr,
ssl_context: SslContext::new(SslMethod::Tlsv1).unwrap(),
security_level: SecurityLevel::Opportunistic,
smtp_utf8: false,
credentials: None,
connection_reuse_count_limit: 100,
connection_reuse: false,
hello_name: "localhost".to_string(),
authentication_mechanisms: vec![Mechanism::CramMd5, Mechanism::Plain],
}),
None => Err(From::from("Could nor resolve hostname")),
}
}
/// Creates a new local SMTP client to port 25
pub fn localhost() -> Result<SmtpTransportBuilder, Error> {
SmtpTransportBuilder::new(("localhost", SMTP_PORT))
}
/// Use STARTTLS with a specific context
pub fn ssl_context(mut self, ssl_context: SslContext) -> SmtpTransportBuilder {
self.ssl_context = ssl_context;
self
}
/// Set the security level for SSL/TLS
pub fn security_level(mut self, level: SecurityLevel) -> SmtpTransportBuilder {
self.security_level = level;
self
}
/// Require SSL/TLS using STARTTLS
///
/// Incompatible with `ssl_wrapper()``
pub fn encrypt(mut self) -> SmtpTransportBuilder {
self.security_level = SecurityLevel::AlwaysEncrypt;
self
}
/// Require SSL/TLS using STARTTLS
///
/// Incompatible with `encrypt()`
pub fn ssl_wrapper(mut self) -> SmtpTransportBuilder {
self.security_level = SecurityLevel::EncryptedWrapper;
self
}
/// Enable SMTPUTF8 if the server supports it
pub fn smtp_utf8(mut self, enabled: bool) -> SmtpTransportBuilder {
self.smtp_utf8 = enabled;
self
}
/// Set the name used during HELO or EHLO
pub fn hello_name(mut self, name: &str) -> SmtpTransportBuilder {
self.hello_name = name.to_string();
self
}
/// Enable connection reuse
pub fn connection_reuse(mut self, enable: bool) -> SmtpTransportBuilder {
self.connection_reuse = enable;
self
}
/// Set the maximum number of emails sent using one connection
pub fn connection_reuse_count_limit(mut self, limit: u16) -> SmtpTransportBuilder {
self.connection_reuse_count_limit = limit;
self
}
/// Set the client credentials
pub fn credentials(mut self, username: &str, password: &str) -> SmtpTransportBuilder {
self.credentials = Some((username.to_string(), password.to_string()));
self
}
/// Set the authentication mechanisms
pub fn authentication_mechanisms(mut self, mechanisms: Vec<Mechanism>) -> SmtpTransportBuilder {
self.authentication_mechanisms = mechanisms;
self
}
/// Build the SMTP client
///
/// It does not connects to the server, but only creates the `SmtpTransport`
pub fn build(self) -> SmtpTransport {
SmtpTransport::new(self)
}
}
/// Represents the state of a client
#[derive(Debug)]
struct State {
/// Panic state
pub panic: bool,
/// Connection reuse counter
pub connection_reuse_count: u16,
}
/// Structure that implements the high level SMTP client
pub struct SmtpTransport {
/// Information about the server
/// Value is None before HELO/EHLO
server_info: Option<ServerInfo>,
/// SmtpTransport variable states
state: State,
/// Information about the client
client_info: SmtpTransportBuilder,
/// Low level client
client: Client,
}
macro_rules! try_smtp (
($err: expr, $client: ident) => ({
match $err {
Ok(val) => val,
Err(err) => {
if!$client.state.panic {
$client.state.panic = true;
$client.reset();
}
return Err(From::from(err))
},
}
})
);
impl SmtpTransport {
/// Creates a new SMTP client
///
/// It does not connects to the server, but only creates the `SmtpTransport`
pub fn new(builder: SmtpTransportBuilder) -> SmtpTransport {
let client = Client::new();
SmtpTransport {
client: client,
server_info: None,
client_info: builder,
state: State {
panic: false,
connection_reuse_count: 0,
},
}
}
/// Reset the client state
fn reset(&mut self) {
// Close the SMTP transaction if needed
self.close();
// Reset the client state
self.server_info = None;
self.state.panic = false;
self.state.connection_reuse_count = 0;
}
/// Gets the EHLO response and updates server information
pub fn get_ehlo(&mut self) -> EmailResult {
// Extended Hello
let ehlo_response = try_smtp!(self.client.ehlo(&self.client_info.hello_name), self);
self.server_info = Some(try_smtp!(ServerInfo::from_response(&ehlo_response), self));
// Print server information
debug!("server {}", self.server_info.as_ref().unwrap());
Ok(ehlo_response)
}
}
impl EmailTransport for SmtpTransport {
/// Sends an email
fn send<T: SendableEmail>(&mut self, email: T) -> EmailResult {
// Extract email information
let message_id = email.message_id();
let from_address = email.from_address();
let to_addresses = email.to_addresses();
let message = email.message();
// Check if the connection is still available
if self.state.connection_reuse_count > 0 {
if!self.client.is_connected() {
self.reset();
}
}
if self.state.connection_reuse_count == 0 {
try!(self.client.connect(&self.client_info.server_addr,
match &self.client_info.security_level {
&SecurityLevel::EncryptedWrapper =>
Some(&self.client_info.ssl_context),
_ => None,
}));
// Log the connection
info!("connection established to {}", self.client_info.server_addr);
try!(self.get_ehlo());
match (&self.client_info.security_level,
self.server_info.as_ref().unwrap().supports_feature(&Extension::StartTls)) {
(&SecurityLevel::AlwaysEncrypt, false) =>
return Err(From::from("Could not encrypt connection, aborting")),
(&SecurityLevel::Opportunistic, false) => (),
(&SecurityLevel::NeverEncrypt, _) => (),
(&SecurityLevel::EncryptedWrapper, _) => (),
(_, true) => {
try_smtp!(self.client.starttls(), self);
try_smtp!(self.client.upgrade_tls_stream(&self.client_info.ssl_context),
self);
debug!("connection encrypted");
// Send EHLO again
try!(self.get_ehlo());
}
}
if self.client_info.credentials.is_some() {
let (username, password) = self.client_info.credentials.clone().unwrap();
let mut found = false;
for mechanism in self.client_info.authentication_mechanisms.clone() {
if self.server_info.as_ref().unwrap().supports_auth_mechanism(mechanism) {
found = true;
try_smtp!(self.client.auth(mechanism, &username, &password), self);
break;
}
}
if!found {
info!("No supported authentication mechanisms available");
}
}
}
// Mail
let mail_options = match (self.server_info
.as_ref()
.unwrap()
.supports_feature(&Extension::EightBitMime),
self.server_info
.as_ref()
.unwrap()
.supports_feature(&Extension::SmtpUtfEight)) {
(true, true) => Some("BODY=8BITMIME SMTPUTF8"),
(true, false) => Some("BODY=8BITMIME"),
(false, _) => None,
};
try_smtp!(self.client.mail(&from_address, mail_options), self);
// Log the mail command
info!("{}: from=<{}>", message_id, from_address);
// Recipient
for to_address in to_addresses.iter() {
try_smtp!(self.client.rcpt(&to_address), self);
// Log the rcpt command
info!("{}: to=<{}>", message_id, to_address);
}
// Data
try_smtp!(self.client.data(), self);
// Message content
let result = self.client.message(&message);
if result.is_ok() {
// Increment the connection reuse counter
self.state.connection_reuse_count = self.state.connection_reuse_count + 1;
// Log the message
info!("{}: conn_use={}, size={}, status=sent ({})",
message_id,
self.state.connection_reuse_count,
message.len(),
result.as_ref()
.ok()
.unwrap()
.message()
.iter()
.next()
.unwrap_or(&"no response".to_string()));
}
// Test if we can reuse the existing connection
if (!self.client_info.connection_reuse) ||
(self.state.connection_reuse_count >= self.client_info.connection_reuse_count_limit) {
self.reset();
}
result
}
/// Closes the inner connection
fn close(&mut self) {
self.client.close();
}
} | random_line_split |
|
mod.rs | //! Sends an email using the client
use std::string::String;
use std::net::{SocketAddr, ToSocketAddrs};
use openssl::ssl::{SslContext, SslMethod};
use transport::error::{EmailResult, Error};
use transport::smtp::extension::{Extension, ServerInfo};
use transport::smtp::client::Client;
use transport::smtp::authentication::Mechanism;
use transport::EmailTransport;
use email::SendableEmail;
pub mod extension;
pub mod authentication;
pub mod response;
pub mod client;
// Registrated port numbers:
// https://www.iana.
// org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml
/// Default smtp port
pub static SMTP_PORT: u16 = 25;
/// Default submission port
pub static SUBMISSION_PORT: u16 = 587;
// Useful strings and characters
/// The word separator for SMTP transactions
pub static SP: &'static str = " ";
/// The line ending for SMTP transactions (carriage return + line feed)
pub static CRLF: &'static str = "\r\n";
/// Colon
pub static COLON: &'static str = ":";
/// The ending of message content
pub static MESSAGE_ENDING: &'static str = "\r\n.\r\n";
/// NUL unicode character
pub static NUL: &'static str = "\0";
/// TLS security level
#[derive(Debug)]
pub enum SecurityLevel {
/// Use a TLS wrapped connection
///
/// Non RFC-compliant, should only be used if the server does not support STARTTLS.
EncryptedWrapper,
/// Only send an email on encrypted connection (with STARTTLS)
///
/// Recommended mode, prevents MITM when used with verified certificates.
AlwaysEncrypt,
/// Use TLS when available (with STARTTLS)
///
/// Default mode.
Opportunistic,
/// Never use TLS
NeverEncrypt,
}
/// Contains client configuration
pub struct SmtpTransportBuilder {
/// Maximum connection reuse
///
/// Zero means no limitation
connection_reuse_count_limit: u16,
/// Enable connection reuse
connection_reuse: bool,
/// Name sent during HELO or EHLO
hello_name: String,
/// Credentials
credentials: Option<(String, String)>,
/// Socket we are connecting to
server_addr: SocketAddr,
/// SSL contexyt to use
ssl_context: SslContext,
/// TLS security level
security_level: SecurityLevel,
/// Enable UTF8 mailboxes in enveloppe or headers
smtp_utf8: bool,
/// List of authentication mechanism, sorted by priority
authentication_mechanisms: Vec<Mechanism>,
}
/// Builder for the SMTP SmtpTransport
impl SmtpTransportBuilder {
/// Creates a new local SMTP client
pub fn new<A: ToSocketAddrs>(addr: A) -> Result<SmtpTransportBuilder, Error> {
let mut addresses = try!(addr.to_socket_addrs());
match addresses.next() {
Some(addr) => Ok(SmtpTransportBuilder {
server_addr: addr,
ssl_context: SslContext::new(SslMethod::Tlsv1).unwrap(),
security_level: SecurityLevel::Opportunistic,
smtp_utf8: false,
credentials: None,
connection_reuse_count_limit: 100,
connection_reuse: false,
hello_name: "localhost".to_string(),
authentication_mechanisms: vec![Mechanism::CramMd5, Mechanism::Plain],
}),
None => Err(From::from("Could nor resolve hostname")),
}
}
/// Creates a new local SMTP client to port 25
pub fn localhost() -> Result<SmtpTransportBuilder, Error> {
SmtpTransportBuilder::new(("localhost", SMTP_PORT))
}
/// Use STARTTLS with a specific context
pub fn ssl_context(mut self, ssl_context: SslContext) -> SmtpTransportBuilder {
self.ssl_context = ssl_context;
self
}
/// Set the security level for SSL/TLS
pub fn security_level(mut self, level: SecurityLevel) -> SmtpTransportBuilder {
self.security_level = level;
self
}
/// Require SSL/TLS using STARTTLS
///
/// Incompatible with `ssl_wrapper()``
pub fn encrypt(mut self) -> SmtpTransportBuilder {
self.security_level = SecurityLevel::AlwaysEncrypt;
self
}
/// Require SSL/TLS using STARTTLS
///
/// Incompatible with `encrypt()`
pub fn ssl_wrapper(mut self) -> SmtpTransportBuilder {
self.security_level = SecurityLevel::EncryptedWrapper;
self
}
/// Enable SMTPUTF8 if the server supports it
pub fn smtp_utf8(mut self, enabled: bool) -> SmtpTransportBuilder {
self.smtp_utf8 = enabled;
self
}
/// Set the name used during HELO or EHLO
pub fn hello_name(mut self, name: &str) -> SmtpTransportBuilder {
self.hello_name = name.to_string();
self
}
/// Enable connection reuse
pub fn connection_reuse(mut self, enable: bool) -> SmtpTransportBuilder {
self.connection_reuse = enable;
self
}
/// Set the maximum number of emails sent using one connection
pub fn connection_reuse_count_limit(mut self, limit: u16) -> SmtpTransportBuilder {
self.connection_reuse_count_limit = limit;
self
}
/// Set the client credentials
pub fn credentials(mut self, username: &str, password: &str) -> SmtpTransportBuilder {
self.credentials = Some((username.to_string(), password.to_string()));
self
}
/// Set the authentication mechanisms
pub fn authentication_mechanisms(mut self, mechanisms: Vec<Mechanism>) -> SmtpTransportBuilder {
self.authentication_mechanisms = mechanisms;
self
}
/// Build the SMTP client
///
/// It does not connects to the server, but only creates the `SmtpTransport`
pub fn build(self) -> SmtpTransport {
SmtpTransport::new(self)
}
}
/// Represents the state of a client
#[derive(Debug)]
struct State {
/// Panic state
pub panic: bool,
/// Connection reuse counter
pub connection_reuse_count: u16,
}
/// Structure that implements the high level SMTP client
pub struct SmtpTransport {
/// Information about the server
/// Value is None before HELO/EHLO
server_info: Option<ServerInfo>,
/// SmtpTransport variable states
state: State,
/// Information about the client
client_info: SmtpTransportBuilder,
/// Low level client
client: Client,
}
macro_rules! try_smtp (
($err: expr, $client: ident) => ({
match $err {
Ok(val) => val,
Err(err) => {
if!$client.state.panic {
$client.state.panic = true;
$client.reset();
}
return Err(From::from(err))
},
}
})
);
impl SmtpTransport {
/// Creates a new SMTP client
///
/// It does not connects to the server, but only creates the `SmtpTransport`
pub fn new(builder: SmtpTransportBuilder) -> SmtpTransport {
let client = Client::new();
SmtpTransport {
client: client,
server_info: None,
client_info: builder,
state: State {
panic: false,
connection_reuse_count: 0,
},
}
}
/// Reset the client state
fn reset(&mut self) {
// Close the SMTP transaction if needed
self.close();
// Reset the client state
self.server_info = None;
self.state.panic = false;
self.state.connection_reuse_count = 0;
}
/// Gets the EHLO response and updates server information
pub fn get_ehlo(&mut self) -> EmailResult {
// Extended Hello
let ehlo_response = try_smtp!(self.client.ehlo(&self.client_info.hello_name), self);
self.server_info = Some(try_smtp!(ServerInfo::from_response(&ehlo_response), self));
// Print server information
debug!("server {}", self.server_info.as_ref().unwrap());
Ok(ehlo_response)
}
}
impl EmailTransport for SmtpTransport {
/// Sends an email
fn send<T: SendableEmail>(&mut self, email: T) -> EmailResult {
// Extract email information
let message_id = email.message_id();
let from_address = email.from_address();
let to_addresses = email.to_addresses();
let message = email.message();
// Check if the connection is still available
if self.state.connection_reuse_count > 0 {
if!self.client.is_connected() {
self.reset();
}
}
if self.state.connection_reuse_count == 0 {
try!(self.client.connect(&self.client_info.server_addr,
match &self.client_info.security_level {
&SecurityLevel::EncryptedWrapper =>
Some(&self.client_info.ssl_context),
_ => None,
}));
// Log the connection
info!("connection established to {}", self.client_info.server_addr);
try!(self.get_ehlo());
match (&self.client_info.security_level,
self.server_info.as_ref().unwrap().supports_feature(&Extension::StartTls)) {
(&SecurityLevel::AlwaysEncrypt, false) =>
return Err(From::from("Could not encrypt connection, aborting")),
(&SecurityLevel::Opportunistic, false) => (),
(&SecurityLevel::NeverEncrypt, _) => (),
(&SecurityLevel::EncryptedWrapper, _) => (),
(_, true) => {
try_smtp!(self.client.starttls(), self);
try_smtp!(self.client.upgrade_tls_stream(&self.client_info.ssl_context),
self);
debug!("connection encrypted");
// Send EHLO again
try!(self.get_ehlo());
}
}
if self.client_info.credentials.is_some() {
let (username, password) = self.client_info.credentials.clone().unwrap();
let mut found = false;
for mechanism in self.client_info.authentication_mechanisms.clone() {
if self.server_info.as_ref().unwrap().supports_auth_mechanism(mechanism) {
found = true;
try_smtp!(self.client.auth(mechanism, &username, &password), self);
break;
}
}
if!found {
info!("No supported authentication mechanisms available");
}
}
}
// Mail
let mail_options = match (self.server_info
.as_ref()
.unwrap()
.supports_feature(&Extension::EightBitMime),
self.server_info
.as_ref()
.unwrap()
.supports_feature(&Extension::SmtpUtfEight)) {
(true, true) => Some("BODY=8BITMIME SMTPUTF8"),
(true, false) => Some("BODY=8BITMIME"),
(false, _) => None,
};
try_smtp!(self.client.mail(&from_address, mail_options), self);
// Log the mail command
info!("{}: from=<{}>", message_id, from_address);
// Recipient
for to_address in to_addresses.iter() {
try_smtp!(self.client.rcpt(&to_address), self);
// Log the rcpt command
info!("{}: to=<{}>", message_id, to_address);
}
// Data
try_smtp!(self.client.data(), self);
// Message content
let result = self.client.message(&message);
if result.is_ok() {
// Increment the connection reuse counter
self.state.connection_reuse_count = self.state.connection_reuse_count + 1;
// Log the message
info!("{}: conn_use={}, size={}, status=sent ({})",
message_id,
self.state.connection_reuse_count,
message.len(),
result.as_ref()
.ok()
.unwrap()
.message()
.iter()
.next()
.unwrap_or(&"no response".to_string()));
}
// Test if we can reuse the existing connection
if (!self.client_info.connection_reuse) ||
(self.state.connection_reuse_count >= self.client_info.connection_reuse_count_limit) |
result
}
/// Closes the inner connection
fn close(&mut self) {
self.client.close();
}
}
| {
self.reset();
} | conditional_block |
test_stale_read.rs | // Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::atomic::*;
use std::sync::{mpsc, Arc, Mutex};
use std::time::Duration;
use std::{mem, thread};
use kvproto::metapb::{Peer, Region};
use raft::eraftpb::MessageType;
use pd_client::PdClient;
use raftstore::store::Callback;
use test_raftstore::*;
use tikv_util::config::*;
use tikv_util::HandyRwLock;
fn stale_read_during_splitting(right_derive: bool) {
let count = 3;
let mut cluster = new_node_cluster(0, count);
cluster.cfg.raft_store.right_derive_when_split = right_derive;
let election_timeout = configure_for_lease_read(&mut cluster, None, None);
cluster.run();
// Write the initial values.
let key1 = b"k1";
let v1 = b"v1";
cluster.must_put(key1, v1);
let key2 = b"k2";
let v2 = b"v2";
cluster.must_put(key2, v2);
// Get the first region.
let region_left = cluster.get_region(key1);
let region_right = cluster.get_region(key2);
assert_eq!(region_left, region_right);
let region1 = region_left;
assert_eq!(region1.get_id(), 1);
let peer3 = region1
.get_peers()
.iter()
.find(|p| p.get_id() == 3)
.unwrap()
.clone();
cluster.must_transfer_leader(region1.get_id(), peer3.clone());
// Get the current leader.
let leader1 = peer3;
// Pause the apply worker of peer 3.
let apply_split = "apply_before_split_1_3";
fail::cfg(apply_split, "pause").unwrap();
// Split the first region.
cluster.split_region(®ion1, key2, Callback::write(Box::new(move |_| {})));
// Sleep for a while.
// The TiKVs that have followers of the old region will elected a leader
// of the new region.
// TiKV A TiKV B TiKV C
// Region 1 L F F
// Region 2 X L F
// Note: A has the peer 3,
// L: leader, F: follower, X: peer is not ready.
thread::sleep(election_timeout);
// A key that is covered by the old region and the new region.
let stale_key = if right_derive { key1 } else { key2 };
// Get the new region.
let region2 = cluster.get_region_with(stale_key, |region| region!= ®ion1);
// Get the leader of the new region.
let leader2 = cluster.leader_of_region(region2.get_id()).unwrap();
assert_ne!(leader1.get_store_id(), leader2.get_store_id());
must_not_stale_read(
&mut cluster,
stale_key,
®ion1,
&leader1,
®ion2,
&leader2,
apply_split,
);
}
fn must_not_stale_read(
cluster: &mut Cluster<NodeCluster>,
stale_key: &[u8],
old_region: &Region,
old_leader: &Peer,
new_region: &Region,
new_leader: &Peer,
fp: &str,
) {
// A new value for stale_key.
let v3 = b"v3";
let mut request = new_request(
new_region.get_id(),
new_region.get_region_epoch().clone(),
vec![new_put_cf_cmd("default", stale_key, v3)],
false,
);
request.mut_header().set_peer(new_leader.clone());
cluster
.call_command_on_node(new_leader.get_store_id(), request, Duration::from_secs(5))
.unwrap();
// LocalRead.
let read_quorum = false;
must_not_eq_on_key(
cluster,
stale_key,
v3,
read_quorum,
old_region,
old_leader,
new_region,
new_leader,
);
// ReadIndex.
let read_quorum = true;
must_not_eq_on_key(
cluster,
stale_key,
v3,
read_quorum,
old_region,
old_leader,
new_region,
new_leader,
);
// Leaders can always propose read index despite split/merge.
let propose_readindex = "before_propose_readindex";
fail::cfg(propose_readindex, "return(true)").unwrap();
// Can not execute reads that are queued.
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
stale_key,
read_quorum,
Duration::from_secs(1),
);
debug!("stale_key: {:?}, {:?}", stale_key, value1);
value1.unwrap_err(); // Error::Timeout
// Remove the fp.
fail::remove(fp);
// It should read an error instead of timeout.
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
stale_key,
read_quorum,
Duration::from_secs(5),
);
debug!("stale_key: {:?}, {:?}", stale_key, value1);
assert!(value1.unwrap().get_header().has_error());
// Clean up.
fail::remove(propose_readindex);
}
fn | (
cluster: &mut Cluster<NodeCluster>,
key: &[u8],
value: &[u8],
read_quorum: bool,
old_region: &Region,
old_leader: &Peer,
new_region: &Region,
new_leader: &Peer,
) {
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
key,
read_quorum,
Duration::from_secs(1),
);
let value2 = read_on_peer(
cluster,
new_leader.clone(),
new_region.clone(),
key,
read_quorum,
Duration::from_secs(1),
);
debug!("stale_key: {:?}, {:?} vs {:?}", key, value1, value2);
assert_eq!(must_get_value(value2.as_ref().unwrap()).as_slice(), value);
// The old leader should return an error.
assert!(
value1.as_ref().unwrap().get_header().has_error(),
"{:?}",
value1
);
}
#[test]
fn test_node_stale_read_during_splitting_left_derive() {
stale_read_during_splitting(false);
}
#[test]
fn test_node_stale_read_during_splitting_right_derive() {
stale_read_during_splitting(true);
}
#[test]
fn test_stale_read_during_merging() {
let count = 3;
let mut cluster = new_node_cluster(0, count);
configure_for_merge(&mut cluster);
let election_timeout = configure_for_lease_read(&mut cluster, None, None);
cluster.cfg.raft_store.right_derive_when_split = false;
cluster.cfg.raft_store.pd_heartbeat_tick_interval =
cluster.cfg.raft_store.raft_base_tick_interval;
debug!("max leader lease: {:?}", election_timeout);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
cluster.run_conf_change();
// Write the initial values.
let key1 = b"k1";
let v1 = b"v1";
cluster.must_put(key1, v1);
let key2 = b"k2";
let v2 = b"v2";
cluster.must_put(key2, v2);
let region = pd_client.get_region(b"k1").unwrap();
pd_client.must_add_peer(region.get_id(), new_peer(2, 4));
pd_client.must_add_peer(region.get_id(), new_peer(3, 5));
cluster.must_split(®ion, b"k2");
let mut region1 = cluster.get_region(key1);
let mut region1000 = cluster.get_region(key2);
assert_ne!(region1, region1000);
assert_eq!(region1.get_id(), 1); // requires disable right_derive.
let leader1 = region1
.get_peers()
.iter()
.find(|p| p.get_id() == 4)
.unwrap()
.clone();
cluster.must_transfer_leader(region1.get_id(), leader1.clone());
let leader1000 = region1000
.get_peers()
.iter()
.find(|p| p.get_store_id()!= leader1.get_store_id())
.unwrap()
.clone();
cluster.must_transfer_leader(region1000.get_id(), leader1000.clone());
assert_ne!(leader1.get_store_id(), leader1000.get_store_id());
// Sleeps an election timeout. The new leader needs enough time to gather
// all followers progress, in cause the merge request is reject by the
// log gap too large (min_progress == 0).
thread::sleep(election_timeout);
// merge into
// region1000 ------------> region1
cluster.must_try_merge(region1000.get_id(), region1.get_id());
// Pause the apply workers except for the peer 4.
let apply_commit_merge = "apply_before_commit_merge_except_1_4";
fail::cfg(apply_commit_merge, "pause").unwrap();
// Wait for commit merge.
// The TiKVs that have followers of the old region will elected a leader
// of the new region.
// TiKV A TiKV B TiKV C
// Region 1 L F F
// Region 1000 F L F
// after wait
// Region 1 L F F
// Region 1000 X L F
// Note: L: leader, F: follower, X: peer is not exist.
// TODO: what if cluster runs slow and lease is expired.
// Epoch changed by prepare merge.
// We can not use `get_region_with` to get the latest info of reigon 1000,
// because leader1 is not paused, it executes commit merge very fast
// and reports pd, its range covers region1000.
//
// region1000 does prepare merge, it increases ver and conf_ver by 1.
debug!("before merge: {:?} | {:?}", region1000, region1);
let region1000_version = region1000.get_region_epoch().get_version() + 1;
region1000
.mut_region_epoch()
.set_version(region1000_version);
let region1000_conf_version = region1000.get_region_epoch().get_conf_ver() + 1;
region1000
.mut_region_epoch()
.set_conf_ver(region1000_conf_version);
// Epoch changed by commit merge.
region1 = cluster.get_region_with(key1, |region| region!= ®ion1);
debug!("after merge: {:?} | {:?}", region1000, region1);
// A key that is covered by region 1000 and region 1.
let stale_key = key2;
must_not_stale_read(
&mut cluster,
stale_key,
®ion1000,
&leader1000,
®ion1,
&leader1,
apply_commit_merge,
);
}
#[test]
fn test_read_index_when_transfer_leader_2() {
let mut cluster = new_node_cluster(0, 3);
// Increase the election tick to make this test case running reliably.
configure_for_lease_read(&mut cluster, Some(50), Some(10_000));
// Stop log compaction to transfer leader with filter easier.
configure_for_request_snapshot(&mut cluster);
let max_lease = Duration::from_secs(2);
cluster.cfg.raft_store.raft_store_max_leader_lease = ReadableDuration(max_lease);
// Add peer 2 and 3 and wait them to apply it.
cluster.pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
cluster.must_put(b"k0", b"v0");
cluster.pd_client.must_add_peer(r1, new_peer(2, 2));
cluster.pd_client.must_add_peer(r1, new_peer(3, 3));
must_get_equal(&cluster.get_engine(2), b"k0", b"v0");
must_get_equal(&cluster.get_engine(3), b"k0", b"v0");
// Put and test again to ensure that peer 3 get the latest writes by message append
// instead of snapshot, so that transfer leader to peer 3 can 100% success.
cluster.must_put(b"k1", b"v1");
must_get_equal(&cluster.get_engine(2), b"k1", b"v1");
must_get_equal(&cluster.get_engine(3), b"k1", b"v1");
let r1 = cluster.get_region(b"k1");
let old_leader = cluster.leader_of_region(r1.get_id()).unwrap();
// Use a macro instead of a closure to avoid any capture of local variables.
macro_rules! read_on_old_leader {
() => {{
let (tx, rx) = mpsc::sync_channel(1);
let mut read_request = new_request(
r1.get_id(),
r1.get_region_epoch().clone(),
vec![new_get_cmd(b"k1")],
true, // read quorum
);
read_request.mut_header().set_peer(new_peer(1, 1));
let sim = cluster.sim.wl();
sim.async_command_on_node(
old_leader.get_id(),
read_request,
Callback::Read(Box::new(move |resp| tx.send(resp.response).unwrap())),
)
.unwrap();
rx
}};
}
// Delay all raft messages to peer 1.
let dropped_msgs = Arc::new(Mutex::new(Vec::new()));
let filter = Box::new(
RegionPacketFilter::new(r1.get_id(), old_leader.get_store_id())
.direction(Direction::Recv)
.skip(MessageType::MsgTransferLeader)
.when(Arc::new(AtomicBool::new(true)))
.reserve_dropped(Arc::clone(&dropped_msgs)),
);
cluster
.sim
.wl()
.add_recv_filter(old_leader.get_id(), filter);
let resp1 = read_on_old_leader!();
cluster.must_transfer_leader(r1.get_id(), new_peer(3, 3));
let resp2 = read_on_old_leader!();
// Unpark all pending messages and clear all filters.
let router = cluster.sim.wl().get_router(old_leader.get_id()).unwrap();
let mut reserved_msgs = Vec::new();
'LOOP: loop {
for raft_msg in mem::replace(dropped_msgs.lock().unwrap().as_mut(), vec![]) {
let msg_type = raft_msg.get_message().get_msg_type();
if msg_type == MessageType::MsgHeartbeatResponse || msg_type == MessageType::MsgAppend {
reserved_msgs.push(raft_msg);
if msg_type == MessageType::MsgAppend {
break 'LOOP;
}
}
}
}
// Resume reserved messages in one batch to make sure the old leader can get read and role
// change in one `Ready`.
fail::cfg("pause_on_peer_collect_message", "pause").unwrap();
for raft_msg in reserved_msgs {
router.send_raft_message(raft_msg).unwrap();
}
fail::cfg("pause_on_peer_collect_message", "off").unwrap();
cluster.sim.wl().clear_recv_filters(old_leader.get_id());
let resp1 = resp1.recv().unwrap();
assert!(resp1.get_header().get_error().has_stale_command());
// Response 2 should contains an error.
let resp2 = resp2.recv().unwrap();
assert!(resp2.get_header().get_error().has_stale_command());
drop(cluster);
fail::remove("pause_on_peer_collect_message");
}
#[test]
fn test_read_after_peer_destroyed() {
let mut cluster = new_node_cluster(0, 3);
let pd_client = cluster.pd_client.clone();
// Disable default max peer number check.
pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
// Add 2 peers.
for i in 2..4 {
pd_client.must_add_peer(r1, new_peer(i, i));
}
// Make sure peer 1 leads the region.
cluster.must_transfer_leader(r1, new_peer(1, 1));
let (key, value) = (b"k1", b"v1");
cluster.must_put(key, value);
assert_eq!(cluster.get(key), Some(value.to_vec()));
let destroy_peer_fp = "destroy_peer";
fail::cfg(destroy_peer_fp, "pause").unwrap();
pd_client.must_remove_peer(r1, new_peer(1, 1));
sleep_ms(300);
// Try writing k2 to peer3
let mut request = new_request(
r1,
cluster.pd_client.get_region_epoch(r1),
vec![new_get_cmd(b"k1")],
false,
);
request.mut_header().set_peer(new_peer(1, 1));
let (cb, rx) = make_cb(&request);
cluster
.sim
.rl()
.async_command_on_node(1, request, cb)
.unwrap();
// Wait for raftstore receives the read request.
sleep_ms(200);
fail::remove(destroy_peer_fp);
let resp = rx.recv_timeout(Duration::from_millis(200)).unwrap();
assert!(
resp.get_header().get_error().has_region_not_found(),
"{:?}",
resp
);
}
/// In previous implementation, we suspect the leader lease at the position of `leader_commit_prepare_merge`
/// failpoint when `PrepareMerge` log is committed, which is too late to prevent stale read.
#[test]
fn test_stale_read_during_merging_2() {
let mut cluster = new_node_cluster(0, 3);
let pd_client = cluster.pd_client.clone();
pd_client.disable_default_operator();
configure_for_merge(&mut cluster);
configure_for_lease_read(&mut cluster, Some(50), Some(20));
cluster.run();
for i in 0..10 {
cluster.must_put(format!("k{}", i).as_bytes(), b"v");
}
let region = pd_client.get_region(b"k1").unwrap();
cluster.must_split(®ion, b"k2");
let left = pd_client.get_region(b"k1").unwrap();
let right = pd_client.get_region(b"k2").unwrap();
let left_peer_1 = find_peer(&left, 1).unwrap().to_owned();
cluster.must_transfer_leader(left.get_id(), left_peer_1.clone());
let right_peer_3 = find_peer(&right, 3).unwrap().to_owned();
cluster.must_transfer_leader(right.get_id(), right_peer_3);
let leader_commit_prepare_merge_fp = "leader_commit_prepare_merge";
fail::cfg(leader_commit_prepare_merge_fp, "pause").unwrap();
pd_client.must_merge(left.get_id(), right.get_id());
cluster.must_put(b"k1", b"v1");
let value = read_on_peer(
&mut cluster,
left_peer_1,
left,
b"k1",
false,
Duration::from_millis(200),
);
// The leader lease must be suspected so the local read is forbidden.
// The result should be Error::Timeout because the leader is paused at
// the position of `leader_commit_prepare_merge` failpoint.
// In previous implementation, the result is ok and the value is "v"
// but the right answer is "v1".
value.unwrap_err();
fail::remove(leader_commit_prepare_merge_fp);
}
| must_not_eq_on_key | identifier_name |
test_stale_read.rs | // Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::atomic::*;
use std::sync::{mpsc, Arc, Mutex};
use std::time::Duration;
use std::{mem, thread};
use kvproto::metapb::{Peer, Region};
use raft::eraftpb::MessageType;
use pd_client::PdClient;
use raftstore::store::Callback;
use test_raftstore::*;
use tikv_util::config::*;
use tikv_util::HandyRwLock;
fn stale_read_during_splitting(right_derive: bool) {
let count = 3;
let mut cluster = new_node_cluster(0, count);
cluster.cfg.raft_store.right_derive_when_split = right_derive;
let election_timeout = configure_for_lease_read(&mut cluster, None, None);
cluster.run();
// Write the initial values.
let key1 = b"k1";
let v1 = b"v1";
cluster.must_put(key1, v1);
let key2 = b"k2";
let v2 = b"v2";
cluster.must_put(key2, v2);
// Get the first region.
let region_left = cluster.get_region(key1);
let region_right = cluster.get_region(key2);
assert_eq!(region_left, region_right);
let region1 = region_left;
assert_eq!(region1.get_id(), 1);
let peer3 = region1
.get_peers()
.iter()
.find(|p| p.get_id() == 3)
.unwrap()
.clone();
cluster.must_transfer_leader(region1.get_id(), peer3.clone());
// Get the current leader.
let leader1 = peer3;
// Pause the apply worker of peer 3.
let apply_split = "apply_before_split_1_3";
fail::cfg(apply_split, "pause").unwrap();
// Split the first region.
cluster.split_region(®ion1, key2, Callback::write(Box::new(move |_| {})));
// Sleep for a while.
// The TiKVs that have followers of the old region will elected a leader
// of the new region.
// TiKV A TiKV B TiKV C
// Region 1 L F F
// Region 2 X L F
// Note: A has the peer 3,
// L: leader, F: follower, X: peer is not ready.
thread::sleep(election_timeout);
// A key that is covered by the old region and the new region.
let stale_key = if right_derive { key1 } else | ;
// Get the new region.
let region2 = cluster.get_region_with(stale_key, |region| region!= ®ion1);
// Get the leader of the new region.
let leader2 = cluster.leader_of_region(region2.get_id()).unwrap();
assert_ne!(leader1.get_store_id(), leader2.get_store_id());
must_not_stale_read(
&mut cluster,
stale_key,
®ion1,
&leader1,
®ion2,
&leader2,
apply_split,
);
}
fn must_not_stale_read(
cluster: &mut Cluster<NodeCluster>,
stale_key: &[u8],
old_region: &Region,
old_leader: &Peer,
new_region: &Region,
new_leader: &Peer,
fp: &str,
) {
// A new value for stale_key.
let v3 = b"v3";
let mut request = new_request(
new_region.get_id(),
new_region.get_region_epoch().clone(),
vec![new_put_cf_cmd("default", stale_key, v3)],
false,
);
request.mut_header().set_peer(new_leader.clone());
cluster
.call_command_on_node(new_leader.get_store_id(), request, Duration::from_secs(5))
.unwrap();
// LocalRead.
let read_quorum = false;
must_not_eq_on_key(
cluster,
stale_key,
v3,
read_quorum,
old_region,
old_leader,
new_region,
new_leader,
);
// ReadIndex.
let read_quorum = true;
must_not_eq_on_key(
cluster,
stale_key,
v3,
read_quorum,
old_region,
old_leader,
new_region,
new_leader,
);
// Leaders can always propose read index despite split/merge.
let propose_readindex = "before_propose_readindex";
fail::cfg(propose_readindex, "return(true)").unwrap();
// Can not execute reads that are queued.
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
stale_key,
read_quorum,
Duration::from_secs(1),
);
debug!("stale_key: {:?}, {:?}", stale_key, value1);
value1.unwrap_err(); // Error::Timeout
// Remove the fp.
fail::remove(fp);
// It should read an error instead of timeout.
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
stale_key,
read_quorum,
Duration::from_secs(5),
);
debug!("stale_key: {:?}, {:?}", stale_key, value1);
assert!(value1.unwrap().get_header().has_error());
// Clean up.
fail::remove(propose_readindex);
}
fn must_not_eq_on_key(
cluster: &mut Cluster<NodeCluster>,
key: &[u8],
value: &[u8],
read_quorum: bool,
old_region: &Region,
old_leader: &Peer,
new_region: &Region,
new_leader: &Peer,
) {
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
key,
read_quorum,
Duration::from_secs(1),
);
let value2 = read_on_peer(
cluster,
new_leader.clone(),
new_region.clone(),
key,
read_quorum,
Duration::from_secs(1),
);
debug!("stale_key: {:?}, {:?} vs {:?}", key, value1, value2);
assert_eq!(must_get_value(value2.as_ref().unwrap()).as_slice(), value);
// The old leader should return an error.
assert!(
value1.as_ref().unwrap().get_header().has_error(),
"{:?}",
value1
);
}
#[test]
fn test_node_stale_read_during_splitting_left_derive() {
stale_read_during_splitting(false);
}
#[test]
fn test_node_stale_read_during_splitting_right_derive() {
stale_read_during_splitting(true);
}
#[test]
fn test_stale_read_during_merging() {
let count = 3;
let mut cluster = new_node_cluster(0, count);
configure_for_merge(&mut cluster);
let election_timeout = configure_for_lease_read(&mut cluster, None, None);
cluster.cfg.raft_store.right_derive_when_split = false;
cluster.cfg.raft_store.pd_heartbeat_tick_interval =
cluster.cfg.raft_store.raft_base_tick_interval;
debug!("max leader lease: {:?}", election_timeout);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
cluster.run_conf_change();
// Write the initial values.
let key1 = b"k1";
let v1 = b"v1";
cluster.must_put(key1, v1);
let key2 = b"k2";
let v2 = b"v2";
cluster.must_put(key2, v2);
let region = pd_client.get_region(b"k1").unwrap();
pd_client.must_add_peer(region.get_id(), new_peer(2, 4));
pd_client.must_add_peer(region.get_id(), new_peer(3, 5));
cluster.must_split(®ion, b"k2");
let mut region1 = cluster.get_region(key1);
let mut region1000 = cluster.get_region(key2);
assert_ne!(region1, region1000);
assert_eq!(region1.get_id(), 1); // requires disable right_derive.
let leader1 = region1
.get_peers()
.iter()
.find(|p| p.get_id() == 4)
.unwrap()
.clone();
cluster.must_transfer_leader(region1.get_id(), leader1.clone());
let leader1000 = region1000
.get_peers()
.iter()
.find(|p| p.get_store_id()!= leader1.get_store_id())
.unwrap()
.clone();
cluster.must_transfer_leader(region1000.get_id(), leader1000.clone());
assert_ne!(leader1.get_store_id(), leader1000.get_store_id());
// Sleeps an election timeout. The new leader needs enough time to gather
// all followers progress, in cause the merge request is reject by the
// log gap too large (min_progress == 0).
thread::sleep(election_timeout);
// merge into
// region1000 ------------> region1
cluster.must_try_merge(region1000.get_id(), region1.get_id());
// Pause the apply workers except for the peer 4.
let apply_commit_merge = "apply_before_commit_merge_except_1_4";
fail::cfg(apply_commit_merge, "pause").unwrap();
// Wait for commit merge.
// The TiKVs that have followers of the old region will elected a leader
// of the new region.
// TiKV A TiKV B TiKV C
// Region 1 L F F
// Region 1000 F L F
// after wait
// Region 1 L F F
// Region 1000 X L F
// Note: L: leader, F: follower, X: peer is not exist.
// TODO: what if cluster runs slow and lease is expired.
// Epoch changed by prepare merge.
// We can not use `get_region_with` to get the latest info of reigon 1000,
// because leader1 is not paused, it executes commit merge very fast
// and reports pd, its range covers region1000.
//
// region1000 does prepare merge, it increases ver and conf_ver by 1.
debug!("before merge: {:?} | {:?}", region1000, region1);
let region1000_version = region1000.get_region_epoch().get_version() + 1;
region1000
.mut_region_epoch()
.set_version(region1000_version);
let region1000_conf_version = region1000.get_region_epoch().get_conf_ver() + 1;
region1000
.mut_region_epoch()
.set_conf_ver(region1000_conf_version);
// Epoch changed by commit merge.
region1 = cluster.get_region_with(key1, |region| region!= ®ion1);
debug!("after merge: {:?} | {:?}", region1000, region1);
// A key that is covered by region 1000 and region 1.
let stale_key = key2;
must_not_stale_read(
&mut cluster,
stale_key,
®ion1000,
&leader1000,
®ion1,
&leader1,
apply_commit_merge,
);
}
#[test]
fn test_read_index_when_transfer_leader_2() {
let mut cluster = new_node_cluster(0, 3);
// Increase the election tick to make this test case running reliably.
configure_for_lease_read(&mut cluster, Some(50), Some(10_000));
// Stop log compaction to transfer leader with filter easier.
configure_for_request_snapshot(&mut cluster);
let max_lease = Duration::from_secs(2);
cluster.cfg.raft_store.raft_store_max_leader_lease = ReadableDuration(max_lease);
// Add peer 2 and 3 and wait them to apply it.
cluster.pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
cluster.must_put(b"k0", b"v0");
cluster.pd_client.must_add_peer(r1, new_peer(2, 2));
cluster.pd_client.must_add_peer(r1, new_peer(3, 3));
must_get_equal(&cluster.get_engine(2), b"k0", b"v0");
must_get_equal(&cluster.get_engine(3), b"k0", b"v0");
// Put and test again to ensure that peer 3 get the latest writes by message append
// instead of snapshot, so that transfer leader to peer 3 can 100% success.
cluster.must_put(b"k1", b"v1");
must_get_equal(&cluster.get_engine(2), b"k1", b"v1");
must_get_equal(&cluster.get_engine(3), b"k1", b"v1");
let r1 = cluster.get_region(b"k1");
let old_leader = cluster.leader_of_region(r1.get_id()).unwrap();
// Use a macro instead of a closure to avoid any capture of local variables.
macro_rules! read_on_old_leader {
() => {{
let (tx, rx) = mpsc::sync_channel(1);
let mut read_request = new_request(
r1.get_id(),
r1.get_region_epoch().clone(),
vec![new_get_cmd(b"k1")],
true, // read quorum
);
read_request.mut_header().set_peer(new_peer(1, 1));
let sim = cluster.sim.wl();
sim.async_command_on_node(
old_leader.get_id(),
read_request,
Callback::Read(Box::new(move |resp| tx.send(resp.response).unwrap())),
)
.unwrap();
rx
}};
}
// Delay all raft messages to peer 1.
let dropped_msgs = Arc::new(Mutex::new(Vec::new()));
let filter = Box::new(
RegionPacketFilter::new(r1.get_id(), old_leader.get_store_id())
.direction(Direction::Recv)
.skip(MessageType::MsgTransferLeader)
.when(Arc::new(AtomicBool::new(true)))
.reserve_dropped(Arc::clone(&dropped_msgs)),
);
cluster
.sim
.wl()
.add_recv_filter(old_leader.get_id(), filter);
let resp1 = read_on_old_leader!();
cluster.must_transfer_leader(r1.get_id(), new_peer(3, 3));
let resp2 = read_on_old_leader!();
// Unpark all pending messages and clear all filters.
let router = cluster.sim.wl().get_router(old_leader.get_id()).unwrap();
let mut reserved_msgs = Vec::new();
'LOOP: loop {
for raft_msg in mem::replace(dropped_msgs.lock().unwrap().as_mut(), vec![]) {
let msg_type = raft_msg.get_message().get_msg_type();
if msg_type == MessageType::MsgHeartbeatResponse || msg_type == MessageType::MsgAppend {
reserved_msgs.push(raft_msg);
if msg_type == MessageType::MsgAppend {
break 'LOOP;
}
}
}
}
// Resume reserved messages in one batch to make sure the old leader can get read and role
// change in one `Ready`.
fail::cfg("pause_on_peer_collect_message", "pause").unwrap();
for raft_msg in reserved_msgs {
router.send_raft_message(raft_msg).unwrap();
}
fail::cfg("pause_on_peer_collect_message", "off").unwrap();
cluster.sim.wl().clear_recv_filters(old_leader.get_id());
let resp1 = resp1.recv().unwrap();
assert!(resp1.get_header().get_error().has_stale_command());
// Response 2 should contains an error.
let resp2 = resp2.recv().unwrap();
assert!(resp2.get_header().get_error().has_stale_command());
drop(cluster);
fail::remove("pause_on_peer_collect_message");
}
#[test]
fn test_read_after_peer_destroyed() {
let mut cluster = new_node_cluster(0, 3);
let pd_client = cluster.pd_client.clone();
// Disable default max peer number check.
pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
// Add 2 peers.
for i in 2..4 {
pd_client.must_add_peer(r1, new_peer(i, i));
}
// Make sure peer 1 leads the region.
cluster.must_transfer_leader(r1, new_peer(1, 1));
let (key, value) = (b"k1", b"v1");
cluster.must_put(key, value);
assert_eq!(cluster.get(key), Some(value.to_vec()));
let destroy_peer_fp = "destroy_peer";
fail::cfg(destroy_peer_fp, "pause").unwrap();
pd_client.must_remove_peer(r1, new_peer(1, 1));
sleep_ms(300);
// Try writing k2 to peer3
let mut request = new_request(
r1,
cluster.pd_client.get_region_epoch(r1),
vec![new_get_cmd(b"k1")],
false,
);
request.mut_header().set_peer(new_peer(1, 1));
let (cb, rx) = make_cb(&request);
cluster
.sim
.rl()
.async_command_on_node(1, request, cb)
.unwrap();
// Wait for raftstore receives the read request.
sleep_ms(200);
fail::remove(destroy_peer_fp);
let resp = rx.recv_timeout(Duration::from_millis(200)).unwrap();
assert!(
resp.get_header().get_error().has_region_not_found(),
"{:?}",
resp
);
}
/// In previous implementation, we suspect the leader lease at the position of `leader_commit_prepare_merge`
/// failpoint when `PrepareMerge` log is committed, which is too late to prevent stale read.
#[test]
fn test_stale_read_during_merging_2() {
let mut cluster = new_node_cluster(0, 3);
let pd_client = cluster.pd_client.clone();
pd_client.disable_default_operator();
configure_for_merge(&mut cluster);
configure_for_lease_read(&mut cluster, Some(50), Some(20));
cluster.run();
for i in 0..10 {
cluster.must_put(format!("k{}", i).as_bytes(), b"v");
}
let region = pd_client.get_region(b"k1").unwrap();
cluster.must_split(®ion, b"k2");
let left = pd_client.get_region(b"k1").unwrap();
let right = pd_client.get_region(b"k2").unwrap();
let left_peer_1 = find_peer(&left, 1).unwrap().to_owned();
cluster.must_transfer_leader(left.get_id(), left_peer_1.clone());
let right_peer_3 = find_peer(&right, 3).unwrap().to_owned();
cluster.must_transfer_leader(right.get_id(), right_peer_3);
let leader_commit_prepare_merge_fp = "leader_commit_prepare_merge";
fail::cfg(leader_commit_prepare_merge_fp, "pause").unwrap();
pd_client.must_merge(left.get_id(), right.get_id());
cluster.must_put(b"k1", b"v1");
let value = read_on_peer(
&mut cluster,
left_peer_1,
left,
b"k1",
false,
Duration::from_millis(200),
);
// The leader lease must be suspected so the local read is forbidden.
// The result should be Error::Timeout because the leader is paused at
// the position of `leader_commit_prepare_merge` failpoint.
// In previous implementation, the result is ok and the value is "v"
// but the right answer is "v1".
value.unwrap_err();
fail::remove(leader_commit_prepare_merge_fp);
}
| { key2 } | conditional_block |
test_stale_read.rs | // Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::atomic::*;
use std::sync::{mpsc, Arc, Mutex};
use std::time::Duration;
use std::{mem, thread};
use kvproto::metapb::{Peer, Region};
use raft::eraftpb::MessageType;
use pd_client::PdClient;
use raftstore::store::Callback;
use test_raftstore::*;
use tikv_util::config::*;
use tikv_util::HandyRwLock;
fn stale_read_during_splitting(right_derive: bool) {
let count = 3;
let mut cluster = new_node_cluster(0, count);
cluster.cfg.raft_store.right_derive_when_split = right_derive;
let election_timeout = configure_for_lease_read(&mut cluster, None, None);
cluster.run();
// Write the initial values.
let key1 = b"k1";
let v1 = b"v1";
cluster.must_put(key1, v1);
let key2 = b"k2";
let v2 = b"v2";
cluster.must_put(key2, v2);
// Get the first region.
let region_left = cluster.get_region(key1);
let region_right = cluster.get_region(key2);
assert_eq!(region_left, region_right); | .get_peers()
.iter()
.find(|p| p.get_id() == 3)
.unwrap()
.clone();
cluster.must_transfer_leader(region1.get_id(), peer3.clone());
// Get the current leader.
let leader1 = peer3;
// Pause the apply worker of peer 3.
let apply_split = "apply_before_split_1_3";
fail::cfg(apply_split, "pause").unwrap();
// Split the first region.
cluster.split_region(®ion1, key2, Callback::write(Box::new(move |_| {})));
// Sleep for a while.
// The TiKVs that have followers of the old region will elected a leader
// of the new region.
// TiKV A TiKV B TiKV C
// Region 1 L F F
// Region 2 X L F
// Note: A has the peer 3,
// L: leader, F: follower, X: peer is not ready.
thread::sleep(election_timeout);
// A key that is covered by the old region and the new region.
let stale_key = if right_derive { key1 } else { key2 };
// Get the new region.
let region2 = cluster.get_region_with(stale_key, |region| region!= ®ion1);
// Get the leader of the new region.
let leader2 = cluster.leader_of_region(region2.get_id()).unwrap();
assert_ne!(leader1.get_store_id(), leader2.get_store_id());
must_not_stale_read(
&mut cluster,
stale_key,
®ion1,
&leader1,
®ion2,
&leader2,
apply_split,
);
}
fn must_not_stale_read(
cluster: &mut Cluster<NodeCluster>,
stale_key: &[u8],
old_region: &Region,
old_leader: &Peer,
new_region: &Region,
new_leader: &Peer,
fp: &str,
) {
// A new value for stale_key.
let v3 = b"v3";
let mut request = new_request(
new_region.get_id(),
new_region.get_region_epoch().clone(),
vec![new_put_cf_cmd("default", stale_key, v3)],
false,
);
request.mut_header().set_peer(new_leader.clone());
cluster
.call_command_on_node(new_leader.get_store_id(), request, Duration::from_secs(5))
.unwrap();
// LocalRead.
let read_quorum = false;
must_not_eq_on_key(
cluster,
stale_key,
v3,
read_quorum,
old_region,
old_leader,
new_region,
new_leader,
);
// ReadIndex.
let read_quorum = true;
must_not_eq_on_key(
cluster,
stale_key,
v3,
read_quorum,
old_region,
old_leader,
new_region,
new_leader,
);
// Leaders can always propose read index despite split/merge.
let propose_readindex = "before_propose_readindex";
fail::cfg(propose_readindex, "return(true)").unwrap();
// Can not execute reads that are queued.
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
stale_key,
read_quorum,
Duration::from_secs(1),
);
debug!("stale_key: {:?}, {:?}", stale_key, value1);
value1.unwrap_err(); // Error::Timeout
// Remove the fp.
fail::remove(fp);
// It should read an error instead of timeout.
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
stale_key,
read_quorum,
Duration::from_secs(5),
);
debug!("stale_key: {:?}, {:?}", stale_key, value1);
assert!(value1.unwrap().get_header().has_error());
// Clean up.
fail::remove(propose_readindex);
}
fn must_not_eq_on_key(
cluster: &mut Cluster<NodeCluster>,
key: &[u8],
value: &[u8],
read_quorum: bool,
old_region: &Region,
old_leader: &Peer,
new_region: &Region,
new_leader: &Peer,
) {
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
key,
read_quorum,
Duration::from_secs(1),
);
let value2 = read_on_peer(
cluster,
new_leader.clone(),
new_region.clone(),
key,
read_quorum,
Duration::from_secs(1),
);
debug!("stale_key: {:?}, {:?} vs {:?}", key, value1, value2);
assert_eq!(must_get_value(value2.as_ref().unwrap()).as_slice(), value);
// The old leader should return an error.
assert!(
value1.as_ref().unwrap().get_header().has_error(),
"{:?}",
value1
);
}
#[test]
fn test_node_stale_read_during_splitting_left_derive() {
stale_read_during_splitting(false);
}
#[test]
fn test_node_stale_read_during_splitting_right_derive() {
stale_read_during_splitting(true);
}
#[test]
fn test_stale_read_during_merging() {
let count = 3;
let mut cluster = new_node_cluster(0, count);
configure_for_merge(&mut cluster);
let election_timeout = configure_for_lease_read(&mut cluster, None, None);
cluster.cfg.raft_store.right_derive_when_split = false;
cluster.cfg.raft_store.pd_heartbeat_tick_interval =
cluster.cfg.raft_store.raft_base_tick_interval;
debug!("max leader lease: {:?}", election_timeout);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
cluster.run_conf_change();
// Write the initial values.
let key1 = b"k1";
let v1 = b"v1";
cluster.must_put(key1, v1);
let key2 = b"k2";
let v2 = b"v2";
cluster.must_put(key2, v2);
let region = pd_client.get_region(b"k1").unwrap();
pd_client.must_add_peer(region.get_id(), new_peer(2, 4));
pd_client.must_add_peer(region.get_id(), new_peer(3, 5));
cluster.must_split(®ion, b"k2");
let mut region1 = cluster.get_region(key1);
let mut region1000 = cluster.get_region(key2);
assert_ne!(region1, region1000);
assert_eq!(region1.get_id(), 1); // requires disable right_derive.
let leader1 = region1
.get_peers()
.iter()
.find(|p| p.get_id() == 4)
.unwrap()
.clone();
cluster.must_transfer_leader(region1.get_id(), leader1.clone());
let leader1000 = region1000
.get_peers()
.iter()
.find(|p| p.get_store_id()!= leader1.get_store_id())
.unwrap()
.clone();
cluster.must_transfer_leader(region1000.get_id(), leader1000.clone());
assert_ne!(leader1.get_store_id(), leader1000.get_store_id());
// Sleeps an election timeout. The new leader needs enough time to gather
// all followers progress, in cause the merge request is reject by the
// log gap too large (min_progress == 0).
thread::sleep(election_timeout);
// merge into
// region1000 ------------> region1
cluster.must_try_merge(region1000.get_id(), region1.get_id());
// Pause the apply workers except for the peer 4.
let apply_commit_merge = "apply_before_commit_merge_except_1_4";
fail::cfg(apply_commit_merge, "pause").unwrap();
// Wait for commit merge.
// The TiKVs that have followers of the old region will elected a leader
// of the new region.
// TiKV A TiKV B TiKV C
// Region 1 L F F
// Region 1000 F L F
// after wait
// Region 1 L F F
// Region 1000 X L F
// Note: L: leader, F: follower, X: peer is not exist.
// TODO: what if cluster runs slow and lease is expired.
// Epoch changed by prepare merge.
// We can not use `get_region_with` to get the latest info of reigon 1000,
// because leader1 is not paused, it executes commit merge very fast
// and reports pd, its range covers region1000.
//
// region1000 does prepare merge, it increases ver and conf_ver by 1.
debug!("before merge: {:?} | {:?}", region1000, region1);
let region1000_version = region1000.get_region_epoch().get_version() + 1;
region1000
.mut_region_epoch()
.set_version(region1000_version);
let region1000_conf_version = region1000.get_region_epoch().get_conf_ver() + 1;
region1000
.mut_region_epoch()
.set_conf_ver(region1000_conf_version);
// Epoch changed by commit merge.
region1 = cluster.get_region_with(key1, |region| region!= ®ion1);
debug!("after merge: {:?} | {:?}", region1000, region1);
// A key that is covered by region 1000 and region 1.
let stale_key = key2;
must_not_stale_read(
&mut cluster,
stale_key,
®ion1000,
&leader1000,
®ion1,
&leader1,
apply_commit_merge,
);
}
#[test]
fn test_read_index_when_transfer_leader_2() {
let mut cluster = new_node_cluster(0, 3);
// Increase the election tick to make this test case running reliably.
configure_for_lease_read(&mut cluster, Some(50), Some(10_000));
// Stop log compaction to transfer leader with filter easier.
configure_for_request_snapshot(&mut cluster);
let max_lease = Duration::from_secs(2);
cluster.cfg.raft_store.raft_store_max_leader_lease = ReadableDuration(max_lease);
// Add peer 2 and 3 and wait them to apply it.
cluster.pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
cluster.must_put(b"k0", b"v0");
cluster.pd_client.must_add_peer(r1, new_peer(2, 2));
cluster.pd_client.must_add_peer(r1, new_peer(3, 3));
must_get_equal(&cluster.get_engine(2), b"k0", b"v0");
must_get_equal(&cluster.get_engine(3), b"k0", b"v0");
// Put and test again to ensure that peer 3 get the latest writes by message append
// instead of snapshot, so that transfer leader to peer 3 can 100% success.
cluster.must_put(b"k1", b"v1");
must_get_equal(&cluster.get_engine(2), b"k1", b"v1");
must_get_equal(&cluster.get_engine(3), b"k1", b"v1");
let r1 = cluster.get_region(b"k1");
let old_leader = cluster.leader_of_region(r1.get_id()).unwrap();
// Use a macro instead of a closure to avoid any capture of local variables.
macro_rules! read_on_old_leader {
() => {{
let (tx, rx) = mpsc::sync_channel(1);
let mut read_request = new_request(
r1.get_id(),
r1.get_region_epoch().clone(),
vec![new_get_cmd(b"k1")],
true, // read quorum
);
read_request.mut_header().set_peer(new_peer(1, 1));
let sim = cluster.sim.wl();
sim.async_command_on_node(
old_leader.get_id(),
read_request,
Callback::Read(Box::new(move |resp| tx.send(resp.response).unwrap())),
)
.unwrap();
rx
}};
}
// Delay all raft messages to peer 1.
let dropped_msgs = Arc::new(Mutex::new(Vec::new()));
let filter = Box::new(
RegionPacketFilter::new(r1.get_id(), old_leader.get_store_id())
.direction(Direction::Recv)
.skip(MessageType::MsgTransferLeader)
.when(Arc::new(AtomicBool::new(true)))
.reserve_dropped(Arc::clone(&dropped_msgs)),
);
cluster
.sim
.wl()
.add_recv_filter(old_leader.get_id(), filter);
let resp1 = read_on_old_leader!();
cluster.must_transfer_leader(r1.get_id(), new_peer(3, 3));
let resp2 = read_on_old_leader!();
// Unpark all pending messages and clear all filters.
let router = cluster.sim.wl().get_router(old_leader.get_id()).unwrap();
let mut reserved_msgs = Vec::new();
'LOOP: loop {
for raft_msg in mem::replace(dropped_msgs.lock().unwrap().as_mut(), vec![]) {
let msg_type = raft_msg.get_message().get_msg_type();
if msg_type == MessageType::MsgHeartbeatResponse || msg_type == MessageType::MsgAppend {
reserved_msgs.push(raft_msg);
if msg_type == MessageType::MsgAppend {
break 'LOOP;
}
}
}
}
// Resume reserved messages in one batch to make sure the old leader can get read and role
// change in one `Ready`.
fail::cfg("pause_on_peer_collect_message", "pause").unwrap();
for raft_msg in reserved_msgs {
router.send_raft_message(raft_msg).unwrap();
}
fail::cfg("pause_on_peer_collect_message", "off").unwrap();
cluster.sim.wl().clear_recv_filters(old_leader.get_id());
let resp1 = resp1.recv().unwrap();
assert!(resp1.get_header().get_error().has_stale_command());
// Response 2 should contains an error.
let resp2 = resp2.recv().unwrap();
assert!(resp2.get_header().get_error().has_stale_command());
drop(cluster);
fail::remove("pause_on_peer_collect_message");
}
#[test]
fn test_read_after_peer_destroyed() {
let mut cluster = new_node_cluster(0, 3);
let pd_client = cluster.pd_client.clone();
// Disable default max peer number check.
pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
// Add 2 peers.
for i in 2..4 {
pd_client.must_add_peer(r1, new_peer(i, i));
}
// Make sure peer 1 leads the region.
cluster.must_transfer_leader(r1, new_peer(1, 1));
let (key, value) = (b"k1", b"v1");
cluster.must_put(key, value);
assert_eq!(cluster.get(key), Some(value.to_vec()));
let destroy_peer_fp = "destroy_peer";
fail::cfg(destroy_peer_fp, "pause").unwrap();
pd_client.must_remove_peer(r1, new_peer(1, 1));
sleep_ms(300);
// Try writing k2 to peer3
let mut request = new_request(
r1,
cluster.pd_client.get_region_epoch(r1),
vec![new_get_cmd(b"k1")],
false,
);
request.mut_header().set_peer(new_peer(1, 1));
let (cb, rx) = make_cb(&request);
cluster
.sim
.rl()
.async_command_on_node(1, request, cb)
.unwrap();
// Wait for raftstore receives the read request.
sleep_ms(200);
fail::remove(destroy_peer_fp);
let resp = rx.recv_timeout(Duration::from_millis(200)).unwrap();
assert!(
resp.get_header().get_error().has_region_not_found(),
"{:?}",
resp
);
}
/// In previous implementation, we suspect the leader lease at the position of `leader_commit_prepare_merge`
/// failpoint when `PrepareMerge` log is committed, which is too late to prevent stale read.
#[test]
fn test_stale_read_during_merging_2() {
let mut cluster = new_node_cluster(0, 3);
let pd_client = cluster.pd_client.clone();
pd_client.disable_default_operator();
configure_for_merge(&mut cluster);
configure_for_lease_read(&mut cluster, Some(50), Some(20));
cluster.run();
for i in 0..10 {
cluster.must_put(format!("k{}", i).as_bytes(), b"v");
}
let region = pd_client.get_region(b"k1").unwrap();
cluster.must_split(®ion, b"k2");
let left = pd_client.get_region(b"k1").unwrap();
let right = pd_client.get_region(b"k2").unwrap();
let left_peer_1 = find_peer(&left, 1).unwrap().to_owned();
cluster.must_transfer_leader(left.get_id(), left_peer_1.clone());
let right_peer_3 = find_peer(&right, 3).unwrap().to_owned();
cluster.must_transfer_leader(right.get_id(), right_peer_3);
let leader_commit_prepare_merge_fp = "leader_commit_prepare_merge";
fail::cfg(leader_commit_prepare_merge_fp, "pause").unwrap();
pd_client.must_merge(left.get_id(), right.get_id());
cluster.must_put(b"k1", b"v1");
let value = read_on_peer(
&mut cluster,
left_peer_1,
left,
b"k1",
false,
Duration::from_millis(200),
);
// The leader lease must be suspected so the local read is forbidden.
// The result should be Error::Timeout because the leader is paused at
// the position of `leader_commit_prepare_merge` failpoint.
// In previous implementation, the result is ok and the value is "v"
// but the right answer is "v1".
value.unwrap_err();
fail::remove(leader_commit_prepare_merge_fp);
} | let region1 = region_left;
assert_eq!(region1.get_id(), 1);
let peer3 = region1 | random_line_split |
test_stale_read.rs | // Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::atomic::*;
use std::sync::{mpsc, Arc, Mutex};
use std::time::Duration;
use std::{mem, thread};
use kvproto::metapb::{Peer, Region};
use raft::eraftpb::MessageType;
use pd_client::PdClient;
use raftstore::store::Callback;
use test_raftstore::*;
use tikv_util::config::*;
use tikv_util::HandyRwLock;
fn stale_read_during_splitting(right_derive: bool) {
let count = 3;
let mut cluster = new_node_cluster(0, count);
cluster.cfg.raft_store.right_derive_when_split = right_derive;
let election_timeout = configure_for_lease_read(&mut cluster, None, None);
cluster.run();
// Write the initial values.
let key1 = b"k1";
let v1 = b"v1";
cluster.must_put(key1, v1);
let key2 = b"k2";
let v2 = b"v2";
cluster.must_put(key2, v2);
// Get the first region.
let region_left = cluster.get_region(key1);
let region_right = cluster.get_region(key2);
assert_eq!(region_left, region_right);
let region1 = region_left;
assert_eq!(region1.get_id(), 1);
let peer3 = region1
.get_peers()
.iter()
.find(|p| p.get_id() == 3)
.unwrap()
.clone();
cluster.must_transfer_leader(region1.get_id(), peer3.clone());
// Get the current leader.
let leader1 = peer3;
// Pause the apply worker of peer 3.
let apply_split = "apply_before_split_1_3";
fail::cfg(apply_split, "pause").unwrap();
// Split the first region.
cluster.split_region(®ion1, key2, Callback::write(Box::new(move |_| {})));
// Sleep for a while.
// The TiKVs that have followers of the old region will elected a leader
// of the new region.
// TiKV A TiKV B TiKV C
// Region 1 L F F
// Region 2 X L F
// Note: A has the peer 3,
// L: leader, F: follower, X: peer is not ready.
thread::sleep(election_timeout);
// A key that is covered by the old region and the new region.
let stale_key = if right_derive { key1 } else { key2 };
// Get the new region.
let region2 = cluster.get_region_with(stale_key, |region| region!= ®ion1);
// Get the leader of the new region.
let leader2 = cluster.leader_of_region(region2.get_id()).unwrap();
assert_ne!(leader1.get_store_id(), leader2.get_store_id());
must_not_stale_read(
&mut cluster,
stale_key,
®ion1,
&leader1,
®ion2,
&leader2,
apply_split,
);
}
fn must_not_stale_read(
cluster: &mut Cluster<NodeCluster>,
stale_key: &[u8],
old_region: &Region,
old_leader: &Peer,
new_region: &Region,
new_leader: &Peer,
fp: &str,
) {
// A new value for stale_key.
let v3 = b"v3";
let mut request = new_request(
new_region.get_id(),
new_region.get_region_epoch().clone(),
vec![new_put_cf_cmd("default", stale_key, v3)],
false,
);
request.mut_header().set_peer(new_leader.clone());
cluster
.call_command_on_node(new_leader.get_store_id(), request, Duration::from_secs(5))
.unwrap();
// LocalRead.
let read_quorum = false;
must_not_eq_on_key(
cluster,
stale_key,
v3,
read_quorum,
old_region,
old_leader,
new_region,
new_leader,
);
// ReadIndex.
let read_quorum = true;
must_not_eq_on_key(
cluster,
stale_key,
v3,
read_quorum,
old_region,
old_leader,
new_region,
new_leader,
);
// Leaders can always propose read index despite split/merge.
let propose_readindex = "before_propose_readindex";
fail::cfg(propose_readindex, "return(true)").unwrap();
// Can not execute reads that are queued.
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
stale_key,
read_quorum,
Duration::from_secs(1),
);
debug!("stale_key: {:?}, {:?}", stale_key, value1);
value1.unwrap_err(); // Error::Timeout
// Remove the fp.
fail::remove(fp);
// It should read an error instead of timeout.
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
stale_key,
read_quorum,
Duration::from_secs(5),
);
debug!("stale_key: {:?}, {:?}", stale_key, value1);
assert!(value1.unwrap().get_header().has_error());
// Clean up.
fail::remove(propose_readindex);
}
fn must_not_eq_on_key(
cluster: &mut Cluster<NodeCluster>,
key: &[u8],
value: &[u8],
read_quorum: bool,
old_region: &Region,
old_leader: &Peer,
new_region: &Region,
new_leader: &Peer,
) {
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
key,
read_quorum,
Duration::from_secs(1),
);
let value2 = read_on_peer(
cluster,
new_leader.clone(),
new_region.clone(),
key,
read_quorum,
Duration::from_secs(1),
);
debug!("stale_key: {:?}, {:?} vs {:?}", key, value1, value2);
assert_eq!(must_get_value(value2.as_ref().unwrap()).as_slice(), value);
// The old leader should return an error.
assert!(
value1.as_ref().unwrap().get_header().has_error(),
"{:?}",
value1
);
}
#[test]
fn test_node_stale_read_during_splitting_left_derive() {
stale_read_during_splitting(false);
}
#[test]
fn test_node_stale_read_during_splitting_right_derive() {
stale_read_during_splitting(true);
}
#[test]
fn test_stale_read_during_merging() {
let count = 3;
let mut cluster = new_node_cluster(0, count);
configure_for_merge(&mut cluster);
let election_timeout = configure_for_lease_read(&mut cluster, None, None);
cluster.cfg.raft_store.right_derive_when_split = false;
cluster.cfg.raft_store.pd_heartbeat_tick_interval =
cluster.cfg.raft_store.raft_base_tick_interval;
debug!("max leader lease: {:?}", election_timeout);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
cluster.run_conf_change();
// Write the initial values.
let key1 = b"k1";
let v1 = b"v1";
cluster.must_put(key1, v1);
let key2 = b"k2";
let v2 = b"v2";
cluster.must_put(key2, v2);
let region = pd_client.get_region(b"k1").unwrap();
pd_client.must_add_peer(region.get_id(), new_peer(2, 4));
pd_client.must_add_peer(region.get_id(), new_peer(3, 5));
cluster.must_split(®ion, b"k2");
let mut region1 = cluster.get_region(key1);
let mut region1000 = cluster.get_region(key2);
assert_ne!(region1, region1000);
assert_eq!(region1.get_id(), 1); // requires disable right_derive.
let leader1 = region1
.get_peers()
.iter()
.find(|p| p.get_id() == 4)
.unwrap()
.clone();
cluster.must_transfer_leader(region1.get_id(), leader1.clone());
let leader1000 = region1000
.get_peers()
.iter()
.find(|p| p.get_store_id()!= leader1.get_store_id())
.unwrap()
.clone();
cluster.must_transfer_leader(region1000.get_id(), leader1000.clone());
assert_ne!(leader1.get_store_id(), leader1000.get_store_id());
// Sleeps an election timeout. The new leader needs enough time to gather
// all followers progress, in cause the merge request is reject by the
// log gap too large (min_progress == 0).
thread::sleep(election_timeout);
// merge into
// region1000 ------------> region1
cluster.must_try_merge(region1000.get_id(), region1.get_id());
// Pause the apply workers except for the peer 4.
let apply_commit_merge = "apply_before_commit_merge_except_1_4";
fail::cfg(apply_commit_merge, "pause").unwrap();
// Wait for commit merge.
// The TiKVs that have followers of the old region will elected a leader
// of the new region.
// TiKV A TiKV B TiKV C
// Region 1 L F F
// Region 1000 F L F
// after wait
// Region 1 L F F
// Region 1000 X L F
// Note: L: leader, F: follower, X: peer is not exist.
// TODO: what if cluster runs slow and lease is expired.
// Epoch changed by prepare merge.
// We can not use `get_region_with` to get the latest info of reigon 1000,
// because leader1 is not paused, it executes commit merge very fast
// and reports pd, its range covers region1000.
//
// region1000 does prepare merge, it increases ver and conf_ver by 1.
debug!("before merge: {:?} | {:?}", region1000, region1);
let region1000_version = region1000.get_region_epoch().get_version() + 1;
region1000
.mut_region_epoch()
.set_version(region1000_version);
let region1000_conf_version = region1000.get_region_epoch().get_conf_ver() + 1;
region1000
.mut_region_epoch()
.set_conf_ver(region1000_conf_version);
// Epoch changed by commit merge.
region1 = cluster.get_region_with(key1, |region| region!= ®ion1);
debug!("after merge: {:?} | {:?}", region1000, region1);
// A key that is covered by region 1000 and region 1.
let stale_key = key2;
must_not_stale_read(
&mut cluster,
stale_key,
®ion1000,
&leader1000,
®ion1,
&leader1,
apply_commit_merge,
);
}
#[test]
fn test_read_index_when_transfer_leader_2() {
let mut cluster = new_node_cluster(0, 3);
// Increase the election tick to make this test case running reliably.
configure_for_lease_read(&mut cluster, Some(50), Some(10_000));
// Stop log compaction to transfer leader with filter easier.
configure_for_request_snapshot(&mut cluster);
let max_lease = Duration::from_secs(2);
cluster.cfg.raft_store.raft_store_max_leader_lease = ReadableDuration(max_lease);
// Add peer 2 and 3 and wait them to apply it.
cluster.pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
cluster.must_put(b"k0", b"v0");
cluster.pd_client.must_add_peer(r1, new_peer(2, 2));
cluster.pd_client.must_add_peer(r1, new_peer(3, 3));
must_get_equal(&cluster.get_engine(2), b"k0", b"v0");
must_get_equal(&cluster.get_engine(3), b"k0", b"v0");
// Put and test again to ensure that peer 3 get the latest writes by message append
// instead of snapshot, so that transfer leader to peer 3 can 100% success.
cluster.must_put(b"k1", b"v1");
must_get_equal(&cluster.get_engine(2), b"k1", b"v1");
must_get_equal(&cluster.get_engine(3), b"k1", b"v1");
let r1 = cluster.get_region(b"k1");
let old_leader = cluster.leader_of_region(r1.get_id()).unwrap();
// Use a macro instead of a closure to avoid any capture of local variables.
macro_rules! read_on_old_leader {
() => {{
let (tx, rx) = mpsc::sync_channel(1);
let mut read_request = new_request(
r1.get_id(),
r1.get_region_epoch().clone(),
vec![new_get_cmd(b"k1")],
true, // read quorum
);
read_request.mut_header().set_peer(new_peer(1, 1));
let sim = cluster.sim.wl();
sim.async_command_on_node(
old_leader.get_id(),
read_request,
Callback::Read(Box::new(move |resp| tx.send(resp.response).unwrap())),
)
.unwrap();
rx
}};
}
// Delay all raft messages to peer 1.
let dropped_msgs = Arc::new(Mutex::new(Vec::new()));
let filter = Box::new(
RegionPacketFilter::new(r1.get_id(), old_leader.get_store_id())
.direction(Direction::Recv)
.skip(MessageType::MsgTransferLeader)
.when(Arc::new(AtomicBool::new(true)))
.reserve_dropped(Arc::clone(&dropped_msgs)),
);
cluster
.sim
.wl()
.add_recv_filter(old_leader.get_id(), filter);
let resp1 = read_on_old_leader!();
cluster.must_transfer_leader(r1.get_id(), new_peer(3, 3));
let resp2 = read_on_old_leader!();
// Unpark all pending messages and clear all filters.
let router = cluster.sim.wl().get_router(old_leader.get_id()).unwrap();
let mut reserved_msgs = Vec::new();
'LOOP: loop {
for raft_msg in mem::replace(dropped_msgs.lock().unwrap().as_mut(), vec![]) {
let msg_type = raft_msg.get_message().get_msg_type();
if msg_type == MessageType::MsgHeartbeatResponse || msg_type == MessageType::MsgAppend {
reserved_msgs.push(raft_msg);
if msg_type == MessageType::MsgAppend {
break 'LOOP;
}
}
}
}
// Resume reserved messages in one batch to make sure the old leader can get read and role
// change in one `Ready`.
fail::cfg("pause_on_peer_collect_message", "pause").unwrap();
for raft_msg in reserved_msgs {
router.send_raft_message(raft_msg).unwrap();
}
fail::cfg("pause_on_peer_collect_message", "off").unwrap();
cluster.sim.wl().clear_recv_filters(old_leader.get_id());
let resp1 = resp1.recv().unwrap();
assert!(resp1.get_header().get_error().has_stale_command());
// Response 2 should contains an error.
let resp2 = resp2.recv().unwrap();
assert!(resp2.get_header().get_error().has_stale_command());
drop(cluster);
fail::remove("pause_on_peer_collect_message");
}
#[test]
fn test_read_after_peer_destroyed() | pd_client.must_remove_peer(r1, new_peer(1, 1));
sleep_ms(300);
// Try writing k2 to peer3
let mut request = new_request(
r1,
cluster.pd_client.get_region_epoch(r1),
vec![new_get_cmd(b"k1")],
false,
);
request.mut_header().set_peer(new_peer(1, 1));
let (cb, rx) = make_cb(&request);
cluster
.sim
.rl()
.async_command_on_node(1, request, cb)
.unwrap();
// Wait for raftstore receives the read request.
sleep_ms(200);
fail::remove(destroy_peer_fp);
let resp = rx.recv_timeout(Duration::from_millis(200)).unwrap();
assert!(
resp.get_header().get_error().has_region_not_found(),
"{:?}",
resp
);
}
/// In previous implementation, we suspect the leader lease at the position of `leader_commit_prepare_merge`
/// failpoint when `PrepareMerge` log is committed, which is too late to prevent stale read.
#[test]
fn test_stale_read_during_merging_2() {
let mut cluster = new_node_cluster(0, 3);
let pd_client = cluster.pd_client.clone();
pd_client.disable_default_operator();
configure_for_merge(&mut cluster);
configure_for_lease_read(&mut cluster, Some(50), Some(20));
cluster.run();
for i in 0..10 {
cluster.must_put(format!("k{}", i).as_bytes(), b"v");
}
let region = pd_client.get_region(b"k1").unwrap();
cluster.must_split(®ion, b"k2");
let left = pd_client.get_region(b"k1").unwrap();
let right = pd_client.get_region(b"k2").unwrap();
let left_peer_1 = find_peer(&left, 1).unwrap().to_owned();
cluster.must_transfer_leader(left.get_id(), left_peer_1.clone());
let right_peer_3 = find_peer(&right, 3).unwrap().to_owned();
cluster.must_transfer_leader(right.get_id(), right_peer_3);
let leader_commit_prepare_merge_fp = "leader_commit_prepare_merge";
fail::cfg(leader_commit_prepare_merge_fp, "pause").unwrap();
pd_client.must_merge(left.get_id(), right.get_id());
cluster.must_put(b"k1", b"v1");
let value = read_on_peer(
&mut cluster,
left_peer_1,
left,
b"k1",
false,
Duration::from_millis(200),
);
// The leader lease must be suspected so the local read is forbidden.
// The result should be Error::Timeout because the leader is paused at
// the position of `leader_commit_prepare_merge` failpoint.
// In previous implementation, the result is ok and the value is "v"
// but the right answer is "v1".
value.unwrap_err();
fail::remove(leader_commit_prepare_merge_fp);
}
| {
let mut cluster = new_node_cluster(0, 3);
let pd_client = cluster.pd_client.clone();
// Disable default max peer number check.
pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
// Add 2 peers.
for i in 2..4 {
pd_client.must_add_peer(r1, new_peer(i, i));
}
// Make sure peer 1 leads the region.
cluster.must_transfer_leader(r1, new_peer(1, 1));
let (key, value) = (b"k1", b"v1");
cluster.must_put(key, value);
assert_eq!(cluster.get(key), Some(value.to_vec()));
let destroy_peer_fp = "destroy_peer";
fail::cfg(destroy_peer_fp, "pause").unwrap(); | identifier_body |
schedule.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Cost schedule and other parameterisations for the EVM.
/// Definition of the cost schedule and other parameterisations for the EVM.
pub struct Schedule {
/// Does it support exceptional failed code deposit
pub exceptional_failed_code_deposit: bool,
/// Does it have a delegate cal
pub have_delegate_call: bool,
/// VM stack limit
pub stack_limit: usize,
/// Max number of nested calls/creates
pub max_depth: usize,
/// Gas prices for instructions in all tiers
pub tier_step_gas: [usize; 8],
/// Gas price for `EXP` opcode
pub exp_gas: usize,
/// Additional gas for `EXP` opcode for each byte of exponent
pub exp_byte_gas: usize,
/// Gas price for `SHA3` opcode
pub sha3_gas: usize,
/// Additional gas for `SHA3` opcode for each word of hashed memory
pub sha3_word_gas: usize,
/// Gas price for loading from storage
pub sload_gas: usize,
/// Gas price for setting new value to storage (`storage==0`, `new!=0`)
pub sstore_set_gas: usize,
/// Gas price for altering value in storage
pub sstore_reset_gas: usize,
/// Gas refund for `SSTORE` clearing (when `storage!=0`, `new==0`)
pub sstore_refund_gas: usize,
/// Gas price for `JUMPDEST` opcode
pub jumpdest_gas: usize,
/// Gas price for `LOG*`
pub log_gas: usize,
/// Additional gas for data in `LOG*`
pub log_data_gas: usize,
/// Additional gas for each topic in `LOG*`
pub log_topic_gas: usize,
/// Gas price for `CREATE` opcode
pub create_gas: usize,
/// Gas price for `*CALL*` opcodes
pub call_gas: usize,
/// Stipend for transfer for `CALL|CALLCODE` opcode when `value>0`
pub call_stipend: usize,
/// Additional gas required for value transfer (`CALL|CALLCODE`)
pub call_value_transfer_gas: usize,
/// Additional gas for creating new account (`CALL|CALLCODE`)
pub call_new_account_gas: usize,
/// Refund for SUICIDE
pub suicide_refund_gas: usize,
/// Gas for used memory
pub memory_gas: usize,
/// Coefficient used to convert memory size to gas price for memory
pub quad_coeff_div: usize,
/// Cost for contract length when executing `CREATE`
pub create_data_gas: usize,
/// Maximum code size when creating a contract.
pub create_data_limit: usize,
/// Transaction cost
pub tx_gas: usize,
/// `CREATE` transaction cost
pub tx_create_gas: usize,
/// Additional cost for empty data transaction
pub tx_data_zero_gas: usize,
/// Aditional cost for non-empty data transaction
pub tx_data_non_zero_gas: usize,
/// Gas price for copying memory
pub copy_gas: usize,
/// Price of EXTCODESIZE
pub extcodesize_gas: usize,
/// Base price of EXTCODECOPY
pub extcodecopy_base_gas: usize,
/// Price of BALANCE
pub balance_gas: usize,
/// Price of SUICIDE
pub suicide_gas: usize,
/// Amount of additional gas to pay when SUICIDE credits a non-existant account
pub suicide_to_new_account_cost: usize,
/// If Some(x): let limit = GAS * (x - 1) / x; let CALL's gas = min(requested, limit). let CREATE's gas = limit.
/// If None: let CALL's gas = (requested > GAS? [OOG] : GAS). let CREATE's gas = GAS
pub sub_gas_cap_divisor: Option<usize>,
/// Don't ever make empty accounts; contracts start with nonce=1. Also, don't charge 25k when sending/suicide zero-value.
pub no_empty: bool,
/// Kill empty accounts if touched.
pub kill_empty: bool,
}
impl Schedule {
/// Schedule for the Frontier-era of the Ethereum main net.
pub fn new_frontier() -> Schedule {
Self::new(false, false, 21000)
}
/// Schedule for the Homestead-era of the Ethereum main net.
pub fn new_homestead() -> Schedule {
Self::new(true, true, 53000)
}
/// Schedule for the post-EIP-150-era of the Ethereum main net.
pub fn new_post_eip150(max_code_size: usize, fix_exp: bool, no_empty: bool, kill_empty: bool) -> Schedule {
Schedule {
exceptional_failed_code_deposit: true,
have_delegate_call: true,
stack_limit: 1024,
max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],
exp_gas: 10,
exp_byte_gas: if fix_exp {50} else {10},
sha3_gas: 30,
sha3_word_gas: 6,
sload_gas: 200,
sstore_set_gas: 20000,
sstore_reset_gas: 5000,
sstore_refund_gas: 15000,
jumpdest_gas: 1,
log_gas: 375,
log_data_gas: 8,
log_topic_gas: 375,
create_gas: 32000,
call_gas: 700,
call_stipend: 2300,
call_value_transfer_gas: 9000,
call_new_account_gas: 25000,
suicide_refund_gas: 24000,
memory_gas: 3,
quad_coeff_div: 512,
create_data_gas: 200,
create_data_limit: max_code_size,
tx_gas: 21000,
tx_create_gas: 53000,
tx_data_zero_gas: 4,
tx_data_non_zero_gas: 68,
copy_gas: 3,
extcodesize_gas: 700,
extcodecopy_base_gas: 700,
balance_gas: 400,
suicide_gas: 5000,
suicide_to_new_account_cost: 25000,
sub_gas_cap_divisor: Some(64),
no_empty: no_empty,
kill_empty: kill_empty,
}
}
fn new(efcd: bool, hdc: bool, tcg: usize) -> Schedule {
Schedule {
exceptional_failed_code_deposit: efcd,
have_delegate_call: hdc,
stack_limit: 1024,
max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],
exp_gas: 10,
exp_byte_gas: 10,
sha3_gas: 30,
sha3_word_gas: 6,
sload_gas: 50,
sstore_set_gas: 20000,
sstore_reset_gas: 5000,
sstore_refund_gas: 15000,
jumpdest_gas: 1,
log_gas: 375,
log_data_gas: 8,
log_topic_gas: 375,
create_gas: 32000,
call_gas: 40,
call_stipend: 2300,
call_value_transfer_gas: 9000,
call_new_account_gas: 25000,
suicide_refund_gas: 24000,
memory_gas: 3,
quad_coeff_div: 512,
create_data_gas: 200,
create_data_limit: usize::max_value(),
tx_gas: 21000,
tx_create_gas: tcg,
tx_data_zero_gas: 4,
tx_data_non_zero_gas: 68,
copy_gas: 3,
extcodesize_gas: 20,
extcodecopy_base_gas: 20,
balance_gas: 20,
suicide_gas: 0,
suicide_to_new_account_cost: 0,
sub_gas_cap_divisor: None,
no_empty: false,
kill_empty: false,
}
}
}
#[test]
#[cfg(test)]
fn | () {
let s1 = Schedule::new_frontier();
let s2 = Schedule::new_homestead();
// To optimize division we assume 2**9 for quad_coeff_div
assert_eq!(s1.quad_coeff_div, 512);
assert_eq!(s2.quad_coeff_div, 512);
}
| schedule_evm_assumptions | identifier_name |
schedule.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Cost schedule and other parameterisations for the EVM.
/// Definition of the cost schedule and other parameterisations for the EVM.
pub struct Schedule {
/// Does it support exceptional failed code deposit
pub exceptional_failed_code_deposit: bool,
/// Does it have a delegate cal
pub have_delegate_call: bool,
/// VM stack limit
pub stack_limit: usize,
/// Max number of nested calls/creates
pub max_depth: usize,
/// Gas prices for instructions in all tiers
pub tier_step_gas: [usize; 8],
/// Gas price for `EXP` opcode
pub exp_gas: usize,
/// Additional gas for `EXP` opcode for each byte of exponent
pub exp_byte_gas: usize,
/// Gas price for `SHA3` opcode
pub sha3_gas: usize,
/// Additional gas for `SHA3` opcode for each word of hashed memory
pub sha3_word_gas: usize,
/// Gas price for loading from storage
pub sload_gas: usize,
/// Gas price for setting new value to storage (`storage==0`, `new!=0`)
pub sstore_set_gas: usize,
/// Gas price for altering value in storage
pub sstore_reset_gas: usize,
/// Gas refund for `SSTORE` clearing (when `storage!=0`, `new==0`)
pub sstore_refund_gas: usize,
/// Gas price for `JUMPDEST` opcode
pub jumpdest_gas: usize,
/// Gas price for `LOG*`
pub log_gas: usize,
/// Additional gas for data in `LOG*`
pub log_data_gas: usize,
/// Additional gas for each topic in `LOG*`
pub log_topic_gas: usize,
/// Gas price for `CREATE` opcode
pub create_gas: usize,
/// Gas price for `*CALL*` opcodes
pub call_gas: usize,
/// Stipend for transfer for `CALL|CALLCODE` opcode when `value>0`
pub call_stipend: usize,
/// Additional gas required for value transfer (`CALL|CALLCODE`)
pub call_value_transfer_gas: usize,
/// Additional gas for creating new account (`CALL|CALLCODE`)
pub call_new_account_gas: usize,
/// Refund for SUICIDE
pub suicide_refund_gas: usize,
/// Gas for used memory
pub memory_gas: usize,
/// Coefficient used to convert memory size to gas price for memory
pub quad_coeff_div: usize,
/// Cost for contract length when executing `CREATE`
pub create_data_gas: usize,
/// Maximum code size when creating a contract.
pub create_data_limit: usize,
/// Transaction cost
pub tx_gas: usize,
/// `CREATE` transaction cost
pub tx_create_gas: usize,
/// Additional cost for empty data transaction
pub tx_data_zero_gas: usize,
/// Aditional cost for non-empty data transaction
pub tx_data_non_zero_gas: usize,
/// Gas price for copying memory
pub copy_gas: usize,
/// Price of EXTCODESIZE
pub extcodesize_gas: usize,
/// Base price of EXTCODECOPY
pub extcodecopy_base_gas: usize,
/// Price of BALANCE
pub balance_gas: usize,
/// Price of SUICIDE
pub suicide_gas: usize,
/// Amount of additional gas to pay when SUICIDE credits a non-existant account
pub suicide_to_new_account_cost: usize,
/// If Some(x): let limit = GAS * (x - 1) / x; let CALL's gas = min(requested, limit). let CREATE's gas = limit.
/// If None: let CALL's gas = (requested > GAS? [OOG] : GAS). let CREATE's gas = GAS
pub sub_gas_cap_divisor: Option<usize>,
/// Don't ever make empty accounts; contracts start with nonce=1. Also, don't charge 25k when sending/suicide zero-value.
pub no_empty: bool,
/// Kill empty accounts if touched.
pub kill_empty: bool,
}
impl Schedule {
/// Schedule for the Frontier-era of the Ethereum main net.
pub fn new_frontier() -> Schedule {
Self::new(false, false, 21000)
}
/// Schedule for the Homestead-era of the Ethereum main net.
pub fn new_homestead() -> Schedule {
Self::new(true, true, 53000)
}
/// Schedule for the post-EIP-150-era of the Ethereum main net.
pub fn new_post_eip150(max_code_size: usize, fix_exp: bool, no_empty: bool, kill_empty: bool) -> Schedule {
Schedule {
exceptional_failed_code_deposit: true,
have_delegate_call: true,
stack_limit: 1024,
max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],
exp_gas: 10,
exp_byte_gas: if fix_exp {50} else | ,
sha3_gas: 30,
sha3_word_gas: 6,
sload_gas: 200,
sstore_set_gas: 20000,
sstore_reset_gas: 5000,
sstore_refund_gas: 15000,
jumpdest_gas: 1,
log_gas: 375,
log_data_gas: 8,
log_topic_gas: 375,
create_gas: 32000,
call_gas: 700,
call_stipend: 2300,
call_value_transfer_gas: 9000,
call_new_account_gas: 25000,
suicide_refund_gas: 24000,
memory_gas: 3,
quad_coeff_div: 512,
create_data_gas: 200,
create_data_limit: max_code_size,
tx_gas: 21000,
tx_create_gas: 53000,
tx_data_zero_gas: 4,
tx_data_non_zero_gas: 68,
copy_gas: 3,
extcodesize_gas: 700,
extcodecopy_base_gas: 700,
balance_gas: 400,
suicide_gas: 5000,
suicide_to_new_account_cost: 25000,
sub_gas_cap_divisor: Some(64),
no_empty: no_empty,
kill_empty: kill_empty,
}
}
fn new(efcd: bool, hdc: bool, tcg: usize) -> Schedule {
Schedule {
exceptional_failed_code_deposit: efcd,
have_delegate_call: hdc,
stack_limit: 1024,
max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],
exp_gas: 10,
exp_byte_gas: 10,
sha3_gas: 30,
sha3_word_gas: 6,
sload_gas: 50,
sstore_set_gas: 20000,
sstore_reset_gas: 5000,
sstore_refund_gas: 15000,
jumpdest_gas: 1,
log_gas: 375,
log_data_gas: 8,
log_topic_gas: 375,
create_gas: 32000,
call_gas: 40,
call_stipend: 2300,
call_value_transfer_gas: 9000,
call_new_account_gas: 25000,
suicide_refund_gas: 24000,
memory_gas: 3,
quad_coeff_div: 512,
create_data_gas: 200,
create_data_limit: usize::max_value(),
tx_gas: 21000,
tx_create_gas: tcg,
tx_data_zero_gas: 4,
tx_data_non_zero_gas: 68,
copy_gas: 3,
extcodesize_gas: 20,
extcodecopy_base_gas: 20,
balance_gas: 20,
suicide_gas: 0,
suicide_to_new_account_cost: 0,
sub_gas_cap_divisor: None,
no_empty: false,
kill_empty: false,
}
}
}
#[test]
#[cfg(test)]
fn schedule_evm_assumptions() {
let s1 = Schedule::new_frontier();
let s2 = Schedule::new_homestead();
// To optimize division we assume 2**9 for quad_coeff_div
assert_eq!(s1.quad_coeff_div, 512);
assert_eq!(s2.quad_coeff_div, 512);
}
| {10} | conditional_block |
schedule.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Cost schedule and other parameterisations for the EVM.
/// Definition of the cost schedule and other parameterisations for the EVM.
pub struct Schedule {
/// Does it support exceptional failed code deposit
pub exceptional_failed_code_deposit: bool,
/// Does it have a delegate cal
pub have_delegate_call: bool,
/// VM stack limit
pub stack_limit: usize,
/// Max number of nested calls/creates
pub max_depth: usize,
/// Gas prices for instructions in all tiers
pub tier_step_gas: [usize; 8],
/// Gas price for `EXP` opcode
pub exp_gas: usize,
/// Additional gas for `EXP` opcode for each byte of exponent
pub exp_byte_gas: usize,
/// Gas price for `SHA3` opcode
pub sha3_gas: usize,
/// Additional gas for `SHA3` opcode for each word of hashed memory
pub sha3_word_gas: usize,
/// Gas price for loading from storage
pub sload_gas: usize,
/// Gas price for setting new value to storage (`storage==0`, `new!=0`)
pub sstore_set_gas: usize,
/// Gas price for altering value in storage
pub sstore_reset_gas: usize,
/// Gas refund for `SSTORE` clearing (when `storage!=0`, `new==0`)
pub sstore_refund_gas: usize,
/// Gas price for `JUMPDEST` opcode
pub jumpdest_gas: usize,
/// Gas price for `LOG*`
pub log_gas: usize,
/// Additional gas for data in `LOG*`
pub log_data_gas: usize,
/// Additional gas for each topic in `LOG*`
pub log_topic_gas: usize,
/// Gas price for `CREATE` opcode
pub create_gas: usize,
/// Gas price for `*CALL*` opcodes
pub call_gas: usize,
/// Stipend for transfer for `CALL|CALLCODE` opcode when `value>0`
pub call_stipend: usize,
/// Additional gas required for value transfer (`CALL|CALLCODE`)
pub call_value_transfer_gas: usize,
/// Additional gas for creating new account (`CALL|CALLCODE`)
pub call_new_account_gas: usize,
/// Refund for SUICIDE
pub suicide_refund_gas: usize,
/// Gas for used memory
pub memory_gas: usize,
/// Coefficient used to convert memory size to gas price for memory
pub quad_coeff_div: usize,
/// Cost for contract length when executing `CREATE`
pub create_data_gas: usize,
/// Maximum code size when creating a contract.
pub create_data_limit: usize,
/// Transaction cost
pub tx_gas: usize,
/// `CREATE` transaction cost
pub tx_create_gas: usize,
/// Additional cost for empty data transaction
pub tx_data_zero_gas: usize,
/// Aditional cost for non-empty data transaction
pub tx_data_non_zero_gas: usize,
/// Gas price for copying memory
pub copy_gas: usize,
/// Price of EXTCODESIZE
pub extcodesize_gas: usize,
/// Base price of EXTCODECOPY
pub extcodecopy_base_gas: usize,
/// Price of BALANCE
pub balance_gas: usize,
/// Price of SUICIDE
pub suicide_gas: usize, | /// If Some(x): let limit = GAS * (x - 1) / x; let CALL's gas = min(requested, limit). let CREATE's gas = limit.
/// If None: let CALL's gas = (requested > GAS? [OOG] : GAS). let CREATE's gas = GAS
pub sub_gas_cap_divisor: Option<usize>,
/// Don't ever make empty accounts; contracts start with nonce=1. Also, don't charge 25k when sending/suicide zero-value.
pub no_empty: bool,
/// Kill empty accounts if touched.
pub kill_empty: bool,
}
impl Schedule {
/// Schedule for the Frontier-era of the Ethereum main net.
pub fn new_frontier() -> Schedule {
Self::new(false, false, 21000)
}
/// Schedule for the Homestead-era of the Ethereum main net.
pub fn new_homestead() -> Schedule {
Self::new(true, true, 53000)
}
/// Schedule for the post-EIP-150-era of the Ethereum main net.
pub fn new_post_eip150(max_code_size: usize, fix_exp: bool, no_empty: bool, kill_empty: bool) -> Schedule {
Schedule {
exceptional_failed_code_deposit: true,
have_delegate_call: true,
stack_limit: 1024,
max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],
exp_gas: 10,
exp_byte_gas: if fix_exp {50} else {10},
sha3_gas: 30,
sha3_word_gas: 6,
sload_gas: 200,
sstore_set_gas: 20000,
sstore_reset_gas: 5000,
sstore_refund_gas: 15000,
jumpdest_gas: 1,
log_gas: 375,
log_data_gas: 8,
log_topic_gas: 375,
create_gas: 32000,
call_gas: 700,
call_stipend: 2300,
call_value_transfer_gas: 9000,
call_new_account_gas: 25000,
suicide_refund_gas: 24000,
memory_gas: 3,
quad_coeff_div: 512,
create_data_gas: 200,
create_data_limit: max_code_size,
tx_gas: 21000,
tx_create_gas: 53000,
tx_data_zero_gas: 4,
tx_data_non_zero_gas: 68,
copy_gas: 3,
extcodesize_gas: 700,
extcodecopy_base_gas: 700,
balance_gas: 400,
suicide_gas: 5000,
suicide_to_new_account_cost: 25000,
sub_gas_cap_divisor: Some(64),
no_empty: no_empty,
kill_empty: kill_empty,
}
}
fn new(efcd: bool, hdc: bool, tcg: usize) -> Schedule {
Schedule {
exceptional_failed_code_deposit: efcd,
have_delegate_call: hdc,
stack_limit: 1024,
max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],
exp_gas: 10,
exp_byte_gas: 10,
sha3_gas: 30,
sha3_word_gas: 6,
sload_gas: 50,
sstore_set_gas: 20000,
sstore_reset_gas: 5000,
sstore_refund_gas: 15000,
jumpdest_gas: 1,
log_gas: 375,
log_data_gas: 8,
log_topic_gas: 375,
create_gas: 32000,
call_gas: 40,
call_stipend: 2300,
call_value_transfer_gas: 9000,
call_new_account_gas: 25000,
suicide_refund_gas: 24000,
memory_gas: 3,
quad_coeff_div: 512,
create_data_gas: 200,
create_data_limit: usize::max_value(),
tx_gas: 21000,
tx_create_gas: tcg,
tx_data_zero_gas: 4,
tx_data_non_zero_gas: 68,
copy_gas: 3,
extcodesize_gas: 20,
extcodecopy_base_gas: 20,
balance_gas: 20,
suicide_gas: 0,
suicide_to_new_account_cost: 0,
sub_gas_cap_divisor: None,
no_empty: false,
kill_empty: false,
}
}
}
#[test]
#[cfg(test)]
fn schedule_evm_assumptions() {
let s1 = Schedule::new_frontier();
let s2 = Schedule::new_homestead();
// To optimize division we assume 2**9 for quad_coeff_div
assert_eq!(s1.quad_coeff_div, 512);
assert_eq!(s2.quad_coeff_div, 512);
} | /// Amount of additional gas to pay when SUICIDE credits a non-existant account
pub suicide_to_new_account_cost: usize, | random_line_split |
schedule.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Cost schedule and other parameterisations for the EVM.
/// Definition of the cost schedule and other parameterisations for the EVM.
pub struct Schedule {
/// Does it support exceptional failed code deposit
pub exceptional_failed_code_deposit: bool,
/// Does it have a delegate cal
pub have_delegate_call: bool,
/// VM stack limit
pub stack_limit: usize,
/// Max number of nested calls/creates
pub max_depth: usize,
/// Gas prices for instructions in all tiers
pub tier_step_gas: [usize; 8],
/// Gas price for `EXP` opcode
pub exp_gas: usize,
/// Additional gas for `EXP` opcode for each byte of exponent
pub exp_byte_gas: usize,
/// Gas price for `SHA3` opcode
pub sha3_gas: usize,
/// Additional gas for `SHA3` opcode for each word of hashed memory
pub sha3_word_gas: usize,
/// Gas price for loading from storage
pub sload_gas: usize,
/// Gas price for setting new value to storage (`storage==0`, `new!=0`)
pub sstore_set_gas: usize,
/// Gas price for altering value in storage
pub sstore_reset_gas: usize,
/// Gas refund for `SSTORE` clearing (when `storage!=0`, `new==0`)
pub sstore_refund_gas: usize,
/// Gas price for `JUMPDEST` opcode
pub jumpdest_gas: usize,
/// Gas price for `LOG*`
pub log_gas: usize,
/// Additional gas for data in `LOG*`
pub log_data_gas: usize,
/// Additional gas for each topic in `LOG*`
pub log_topic_gas: usize,
/// Gas price for `CREATE` opcode
pub create_gas: usize,
/// Gas price for `*CALL*` opcodes
pub call_gas: usize,
/// Stipend for transfer for `CALL|CALLCODE` opcode when `value>0`
pub call_stipend: usize,
/// Additional gas required for value transfer (`CALL|CALLCODE`)
pub call_value_transfer_gas: usize,
/// Additional gas for creating new account (`CALL|CALLCODE`)
pub call_new_account_gas: usize,
/// Refund for SUICIDE
pub suicide_refund_gas: usize,
/// Gas for used memory
pub memory_gas: usize,
/// Coefficient used to convert memory size to gas price for memory
pub quad_coeff_div: usize,
/// Cost for contract length when executing `CREATE`
pub create_data_gas: usize,
/// Maximum code size when creating a contract.
pub create_data_limit: usize,
/// Transaction cost
pub tx_gas: usize,
/// `CREATE` transaction cost
pub tx_create_gas: usize,
/// Additional cost for empty data transaction
pub tx_data_zero_gas: usize,
/// Aditional cost for non-empty data transaction
pub tx_data_non_zero_gas: usize,
/// Gas price for copying memory
pub copy_gas: usize,
/// Price of EXTCODESIZE
pub extcodesize_gas: usize,
/// Base price of EXTCODECOPY
pub extcodecopy_base_gas: usize,
/// Price of BALANCE
pub balance_gas: usize,
/// Price of SUICIDE
pub suicide_gas: usize,
/// Amount of additional gas to pay when SUICIDE credits a non-existant account
pub suicide_to_new_account_cost: usize,
/// If Some(x): let limit = GAS * (x - 1) / x; let CALL's gas = min(requested, limit). let CREATE's gas = limit.
/// If None: let CALL's gas = (requested > GAS? [OOG] : GAS). let CREATE's gas = GAS
pub sub_gas_cap_divisor: Option<usize>,
/// Don't ever make empty accounts; contracts start with nonce=1. Also, don't charge 25k when sending/suicide zero-value.
pub no_empty: bool,
/// Kill empty accounts if touched.
pub kill_empty: bool,
}
impl Schedule {
/// Schedule for the Frontier-era of the Ethereum main net.
pub fn new_frontier() -> Schedule {
Self::new(false, false, 21000)
}
/// Schedule for the Homestead-era of the Ethereum main net.
pub fn new_homestead() -> Schedule {
Self::new(true, true, 53000)
}
/// Schedule for the post-EIP-150-era of the Ethereum main net.
pub fn new_post_eip150(max_code_size: usize, fix_exp: bool, no_empty: bool, kill_empty: bool) -> Schedule {
Schedule {
exceptional_failed_code_deposit: true,
have_delegate_call: true,
stack_limit: 1024,
max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],
exp_gas: 10,
exp_byte_gas: if fix_exp {50} else {10},
sha3_gas: 30,
sha3_word_gas: 6,
sload_gas: 200,
sstore_set_gas: 20000,
sstore_reset_gas: 5000,
sstore_refund_gas: 15000,
jumpdest_gas: 1,
log_gas: 375,
log_data_gas: 8,
log_topic_gas: 375,
create_gas: 32000,
call_gas: 700,
call_stipend: 2300,
call_value_transfer_gas: 9000,
call_new_account_gas: 25000,
suicide_refund_gas: 24000,
memory_gas: 3,
quad_coeff_div: 512,
create_data_gas: 200,
create_data_limit: max_code_size,
tx_gas: 21000,
tx_create_gas: 53000,
tx_data_zero_gas: 4,
tx_data_non_zero_gas: 68,
copy_gas: 3,
extcodesize_gas: 700,
extcodecopy_base_gas: 700,
balance_gas: 400,
suicide_gas: 5000,
suicide_to_new_account_cost: 25000,
sub_gas_cap_divisor: Some(64),
no_empty: no_empty,
kill_empty: kill_empty,
}
}
fn new(efcd: bool, hdc: bool, tcg: usize) -> Schedule | call_gas: 40,
call_stipend: 2300,
call_value_transfer_gas: 9000,
call_new_account_gas: 25000,
suicide_refund_gas: 24000,
memory_gas: 3,
quad_coeff_div: 512,
create_data_gas: 200,
create_data_limit: usize::max_value(),
tx_gas: 21000,
tx_create_gas: tcg,
tx_data_zero_gas: 4,
tx_data_non_zero_gas: 68,
copy_gas: 3,
extcodesize_gas: 20,
extcodecopy_base_gas: 20,
balance_gas: 20,
suicide_gas: 0,
suicide_to_new_account_cost: 0,
sub_gas_cap_divisor: None,
no_empty: false,
kill_empty: false,
}
}
}
#[test]
#[cfg(test)]
fn schedule_evm_assumptions() {
let s1 = Schedule::new_frontier();
let s2 = Schedule::new_homestead();
// To optimize division we assume 2**9 for quad_coeff_div
assert_eq!(s1.quad_coeff_div, 512);
assert_eq!(s2.quad_coeff_div, 512);
}
| {
Schedule {
exceptional_failed_code_deposit: efcd,
have_delegate_call: hdc,
stack_limit: 1024,
max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],
exp_gas: 10,
exp_byte_gas: 10,
sha3_gas: 30,
sha3_word_gas: 6,
sload_gas: 50,
sstore_set_gas: 20000,
sstore_reset_gas: 5000,
sstore_refund_gas: 15000,
jumpdest_gas: 1,
log_gas: 375,
log_data_gas: 8,
log_topic_gas: 375,
create_gas: 32000, | identifier_body |
lib.rs | #[derive(Debug)]
pub struct Rectangle {
length: u32,
width: u32,
}
impl Rectangle {
pub fn can_hold(&self, other: &Rectangle) -> bool {
self.length > other.length && self.width > other.width
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn | () {
let larger = Rectangle {
length: 8,
width: 7,
};
let smaller = Rectangle {
length: 5,
width: 1,
};
assert!(larger.can_hold(&smaller));
}
#[test]
fn smaller_can_hold_larger() {
let larger = Rectangle {
length: 8,
width: 7,
};
let smaller = Rectangle {
length: 5,
width: 1,
};
assert!(!smaller.can_hold(&larger));
}
}
| larger_can_hold_smaller | identifier_name |
lib.rs | #[derive(Debug)]
pub struct Rectangle {
length: u32,
width: u32,
}
impl Rectangle {
pub fn can_hold(&self, other: &Rectangle) -> bool {
self.length > other.length && self.width > other.width
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn larger_can_hold_smaller() {
let larger = Rectangle {
length: 8,
width: 7,
};
let smaller = Rectangle {
length: 5,
width: 1,
};
assert!(larger.can_hold(&smaller));
}
#[test]
fn smaller_can_hold_larger() {
let larger = Rectangle {
length: 8, | width: 7,
};
let smaller = Rectangle {
length: 5,
width: 1,
};
assert!(!smaller.can_hold(&larger));
}
} | random_line_split |
|
test.rs | extern crate couch;
extern crate http;
extern crate serialize;
#[cfg(test)]
mod test {
use couch::{Server,Document};
#[deriving(Encodable,Decodable)]
struct TestDocument {
_id: String,
body: String
}
impl Document for TestDocument {
fn id(&self) -> String |
}
#[test]
fn speak_to_the_couch() {
let server = Server::new(String::from_str("http://localhost:5984"));
let info = server.info();
assert_eq!(info.message(), "Welcome".to_owned());
}
#[test]
fn create_database() {
let mut server = Server::new(String::from_str("http://localhost:5984"));
server.delete_database("created_by_couch".to_owned());
let database = server.create_database("created_by_couch".to_owned());
assert_eq!(database.name(), "created_by_couch".to_owned());
}
#[test]
fn create_document() {
let mut server = Server::new(String::from_str("http://localhost:5984"));
server.delete_database("create_doc".to_owned());
let mut database = server.create_database("create_doc".to_owned());
let test_doc = &TestDocument { _id: "test".to_owned(), body: "test".to_owned() };
database.put(test_doc);
}
} | {
self._id.clone()
} | identifier_body |
test.rs | extern crate couch;
extern crate http;
extern crate serialize;
#[cfg(test)]
mod test {
use couch::{Server,Document};
#[deriving(Encodable,Decodable)]
struct TestDocument {
_id: String,
body: String
}
impl Document for TestDocument {
fn id(&self) -> String {
self._id.clone()
}
}
#[test]
fn speak_to_the_couch() {
let server = Server::new(String::from_str("http://localhost:5984"));
let info = server.info();
assert_eq!(info.message(), "Welcome".to_owned());
}
#[test]
fn | () {
let mut server = Server::new(String::from_str("http://localhost:5984"));
server.delete_database("created_by_couch".to_owned());
let database = server.create_database("created_by_couch".to_owned());
assert_eq!(database.name(), "created_by_couch".to_owned());
}
#[test]
fn create_document() {
let mut server = Server::new(String::from_str("http://localhost:5984"));
server.delete_database("create_doc".to_owned());
let mut database = server.create_database("create_doc".to_owned());
let test_doc = &TestDocument { _id: "test".to_owned(), body: "test".to_owned() };
database.put(test_doc);
}
} | create_database | identifier_name |
test.rs | extern crate couch;
extern crate http;
extern crate serialize;
|
#[deriving(Encodable,Decodable)]
struct TestDocument {
_id: String,
body: String
}
impl Document for TestDocument {
fn id(&self) -> String {
self._id.clone()
}
}
#[test]
fn speak_to_the_couch() {
let server = Server::new(String::from_str("http://localhost:5984"));
let info = server.info();
assert_eq!(info.message(), "Welcome".to_owned());
}
#[test]
fn create_database() {
let mut server = Server::new(String::from_str("http://localhost:5984"));
server.delete_database("created_by_couch".to_owned());
let database = server.create_database("created_by_couch".to_owned());
assert_eq!(database.name(), "created_by_couch".to_owned());
}
#[test]
fn create_document() {
let mut server = Server::new(String::from_str("http://localhost:5984"));
server.delete_database("create_doc".to_owned());
let mut database = server.create_database("create_doc".to_owned());
let test_doc = &TestDocument { _id: "test".to_owned(), body: "test".to_owned() };
database.put(test_doc);
}
} | #[cfg(test)]
mod test {
use couch::{Server,Document}; | random_line_split |
objpool.rs | /*
Copyright (c) 2016-2017, Robert Ou <[email protected]> and contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::slice::{Iter, IterMut};
use serde_derive::{Deserialize, Serialize};
use slog;
#[derive(Debug, Serialize, Deserialize)]
pub struct ObjPoolIndex<T> {
i: usize,
type_marker: PhantomData<T>
}
impl<T> Copy for ObjPoolIndex<T> { }
impl<T> Clone for ObjPoolIndex<T> {
fn clone(&self) -> ObjPoolIndex<T> {
*self
}
}
impl<T> PartialEq for ObjPoolIndex<T> {
fn eq(&self, other: &ObjPoolIndex<T>) -> bool {
self.i == other.i
}
}
impl<T> Eq for ObjPoolIndex<T> { }
impl<T> Hash for ObjPoolIndex<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.i.hash(state);
}
}
impl<T> ObjPoolIndex<T> {
pub fn get_raw_i(&self) -> usize {
self.i
}
}
impl<T> slog::Value for ObjPoolIndex<T> {
fn serialize(&self, _record: &slog::Record, key: slog::Key, serializer: &mut dyn slog::Serializer) -> slog::Result {
serializer.emit_usize(key, self.i)
}
}
#[derive(Clone, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)]
pub struct ObjPool<T> {
storage: Vec<T>
}
impl<T> ObjPool<T> {
pub fn new() -> ObjPool<T> {
ObjPool {storage: Vec::new()}
}
pub fn insert(&mut self, item: T) -> ObjPoolIndex<T> {
let i = self.storage.len();
self.storage.push(item);
ObjPoolIndex::<T> {i: i, type_marker: PhantomData}
}
pub fn get(&self, i: ObjPoolIndex<T>) -> &T {
&self.storage[i.i]
}
pub fn get_mut(&mut self, i: ObjPoolIndex<T>) -> &mut T {
&mut self.storage[i.i]
}
pub fn iter_idx(&self) -> ObjPoolIdxIterator<T> {
ObjPoolIdxIterator {
pool: self,
current_idx: 0,
}
}
pub fn iter(&self) -> Iter<T> {
self.storage.iter()
}
pub fn iter_mut(&mut self) -> IterMut<T> {
self.storage.iter_mut()
}
pub fn iter_mut_idx(&mut self) -> ObjPoolMutIdxIterator<T> {
ObjPoolMutIdxIterator {
inner_iter: self.storage.iter_mut(),
current_idx: 0,
}
}
pub fn len(&self) -> usize {
self.storage.len()
}
}
impl<T: Default> ObjPool<T> {
pub fn alloc(&mut self) -> ObjPoolIndex<T> {
let i = self.storage.len();
let o = T::default();
self.storage.push(o);
ObjPoolIndex::<T> {i: i, type_marker: PhantomData}
}
}
pub struct ObjPoolIdxIterator<'a, T: 'a> {
pool: &'a ObjPool<T>,
current_idx: usize,
}
impl<'a, T> Iterator for ObjPoolIdxIterator<'a, T> {
type Item = ObjPoolIndex<T>;
fn next(&mut self) -> Option<ObjPoolIndex<T>> |
}
pub struct ObjPoolMutIdxIterator<'a, T: 'a> {
inner_iter: IterMut<'a, T>,
current_idx: usize,
}
impl<'a, T> Iterator for ObjPoolMutIdxIterator<'a, T> {
type Item = (ObjPoolIndex<T>, &'a mut T);
fn next(&mut self) -> Option<Self::Item> {
let next = self.inner_iter.next();
match next {
None => None,
Some(x) => {
let ret = ObjPoolIndex::<T> {i: self.current_idx, type_marker: PhantomData};
self.current_idx += 1;
Some((ret, x))
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(Default)]
struct ObjPoolTestObject {
foo: u32
}
#[test]
fn objpool_basic_works() {
let mut pool = ObjPool::<ObjPoolTestObject>::new();
let x = pool.alloc();
let y = pool.alloc();
{
let o = pool.get_mut(x);
o.foo = 123;
}
{
let o = pool.get_mut(y);
o.foo = 456;
}
let ox = pool.get(x);
let oy = pool.get(y);
assert_eq!(ox.foo, 123);
assert_eq!(oy.foo, 456);
}
}
| {
if self.current_idx == self.pool.storage.len() {
None
} else {
let ret = ObjPoolIndex::<T> {i: self.current_idx, type_marker: PhantomData};
self.current_idx += 1;
Some(ret)
}
} | identifier_body |
objpool.rs | /*
Copyright (c) 2016-2017, Robert Ou <[email protected]> and contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::slice::{Iter, IterMut};
use serde_derive::{Deserialize, Serialize};
use slog;
#[derive(Debug, Serialize, Deserialize)]
pub struct ObjPoolIndex<T> {
i: usize,
type_marker: PhantomData<T>
}
impl<T> Copy for ObjPoolIndex<T> { }
impl<T> Clone for ObjPoolIndex<T> {
fn clone(&self) -> ObjPoolIndex<T> {
*self
}
}
impl<T> PartialEq for ObjPoolIndex<T> {
fn eq(&self, other: &ObjPoolIndex<T>) -> bool {
self.i == other.i
}
}
impl<T> Eq for ObjPoolIndex<T> { }
impl<T> Hash for ObjPoolIndex<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.i.hash(state);
}
}
impl<T> ObjPoolIndex<T> {
pub fn get_raw_i(&self) -> usize {
self.i
}
}
impl<T> slog::Value for ObjPoolIndex<T> {
fn serialize(&self, _record: &slog::Record, key: slog::Key, serializer: &mut dyn slog::Serializer) -> slog::Result {
serializer.emit_usize(key, self.i)
}
}
#[derive(Clone, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)]
pub struct ObjPool<T> {
storage: Vec<T>
}
impl<T> ObjPool<T> {
pub fn new() -> ObjPool<T> {
ObjPool {storage: Vec::new()}
}
pub fn insert(&mut self, item: T) -> ObjPoolIndex<T> {
let i = self.storage.len();
self.storage.push(item);
ObjPoolIndex::<T> {i: i, type_marker: PhantomData}
}
pub fn get(&self, i: ObjPoolIndex<T>) -> &T {
&self.storage[i.i]
}
pub fn get_mut(&mut self, i: ObjPoolIndex<T>) -> &mut T {
&mut self.storage[i.i]
}
pub fn iter_idx(&self) -> ObjPoolIdxIterator<T> {
ObjPoolIdxIterator {
pool: self,
current_idx: 0,
}
}
pub fn iter(&self) -> Iter<T> {
self.storage.iter()
}
pub fn iter_mut(&mut self) -> IterMut<T> {
self.storage.iter_mut()
}
pub fn iter_mut_idx(&mut self) -> ObjPoolMutIdxIterator<T> {
ObjPoolMutIdxIterator {
inner_iter: self.storage.iter_mut(),
current_idx: 0,
}
}
pub fn len(&self) -> usize {
self.storage.len()
}
}
impl<T: Default> ObjPool<T> {
pub fn alloc(&mut self) -> ObjPoolIndex<T> {
let i = self.storage.len();
let o = T::default();
self.storage.push(o);
ObjPoolIndex::<T> {i: i, type_marker: PhantomData}
}
}
pub struct ObjPoolIdxIterator<'a, T: 'a> {
pool: &'a ObjPool<T>,
current_idx: usize,
}
impl<'a, T> Iterator for ObjPoolIdxIterator<'a, T> {
type Item = ObjPoolIndex<T>;
fn next(&mut self) -> Option<ObjPoolIndex<T>> {
if self.current_idx == self.pool.storage.len() {
None
} else |
}
}
pub struct ObjPoolMutIdxIterator<'a, T: 'a> {
inner_iter: IterMut<'a, T>,
current_idx: usize,
}
impl<'a, T> Iterator for ObjPoolMutIdxIterator<'a, T> {
type Item = (ObjPoolIndex<T>, &'a mut T);
fn next(&mut self) -> Option<Self::Item> {
let next = self.inner_iter.next();
match next {
None => None,
Some(x) => {
let ret = ObjPoolIndex::<T> {i: self.current_idx, type_marker: PhantomData};
self.current_idx += 1;
Some((ret, x))
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(Default)]
struct ObjPoolTestObject {
foo: u32
}
#[test]
fn objpool_basic_works() {
let mut pool = ObjPool::<ObjPoolTestObject>::new();
let x = pool.alloc();
let y = pool.alloc();
{
let o = pool.get_mut(x);
o.foo = 123;
}
{
let o = pool.get_mut(y);
o.foo = 456;
}
let ox = pool.get(x);
let oy = pool.get(y);
assert_eq!(ox.foo, 123);
assert_eq!(oy.foo, 456);
}
}
| {
let ret = ObjPoolIndex::<T> {i: self.current_idx, type_marker: PhantomData};
self.current_idx += 1;
Some(ret)
} | conditional_block |
objpool.rs | /*
Copyright (c) 2016-2017, Robert Ou <[email protected]> and contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::slice::{Iter, IterMut};
use serde_derive::{Deserialize, Serialize};
use slog;
#[derive(Debug, Serialize, Deserialize)]
pub struct ObjPoolIndex<T> {
i: usize,
type_marker: PhantomData<T>
}
impl<T> Copy for ObjPoolIndex<T> { }
impl<T> Clone for ObjPoolIndex<T> {
fn clone(&self) -> ObjPoolIndex<T> {
*self
}
}
impl<T> PartialEq for ObjPoolIndex<T> {
fn | (&self, other: &ObjPoolIndex<T>) -> bool {
self.i == other.i
}
}
impl<T> Eq for ObjPoolIndex<T> { }
impl<T> Hash for ObjPoolIndex<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.i.hash(state);
}
}
impl<T> ObjPoolIndex<T> {
pub fn get_raw_i(&self) -> usize {
self.i
}
}
impl<T> slog::Value for ObjPoolIndex<T> {
fn serialize(&self, _record: &slog::Record, key: slog::Key, serializer: &mut dyn slog::Serializer) -> slog::Result {
serializer.emit_usize(key, self.i)
}
}
#[derive(Clone, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)]
pub struct ObjPool<T> {
storage: Vec<T>
}
impl<T> ObjPool<T> {
pub fn new() -> ObjPool<T> {
ObjPool {storage: Vec::new()}
}
pub fn insert(&mut self, item: T) -> ObjPoolIndex<T> {
let i = self.storage.len();
self.storage.push(item);
ObjPoolIndex::<T> {i: i, type_marker: PhantomData}
}
pub fn get(&self, i: ObjPoolIndex<T>) -> &T {
&self.storage[i.i]
}
pub fn get_mut(&mut self, i: ObjPoolIndex<T>) -> &mut T {
&mut self.storage[i.i]
}
pub fn iter_idx(&self) -> ObjPoolIdxIterator<T> {
ObjPoolIdxIterator {
pool: self,
current_idx: 0,
}
}
pub fn iter(&self) -> Iter<T> {
self.storage.iter()
}
pub fn iter_mut(&mut self) -> IterMut<T> {
self.storage.iter_mut()
}
pub fn iter_mut_idx(&mut self) -> ObjPoolMutIdxIterator<T> {
ObjPoolMutIdxIterator {
inner_iter: self.storage.iter_mut(),
current_idx: 0,
}
}
pub fn len(&self) -> usize {
self.storage.len()
}
}
impl<T: Default> ObjPool<T> {
pub fn alloc(&mut self) -> ObjPoolIndex<T> {
let i = self.storage.len();
let o = T::default();
self.storage.push(o);
ObjPoolIndex::<T> {i: i, type_marker: PhantomData}
}
}
pub struct ObjPoolIdxIterator<'a, T: 'a> {
pool: &'a ObjPool<T>,
current_idx: usize,
}
impl<'a, T> Iterator for ObjPoolIdxIterator<'a, T> {
type Item = ObjPoolIndex<T>;
fn next(&mut self) -> Option<ObjPoolIndex<T>> {
if self.current_idx == self.pool.storage.len() {
None
} else {
let ret = ObjPoolIndex::<T> {i: self.current_idx, type_marker: PhantomData};
self.current_idx += 1;
Some(ret)
}
}
}
pub struct ObjPoolMutIdxIterator<'a, T: 'a> {
inner_iter: IterMut<'a, T>,
current_idx: usize,
}
impl<'a, T> Iterator for ObjPoolMutIdxIterator<'a, T> {
type Item = (ObjPoolIndex<T>, &'a mut T);
fn next(&mut self) -> Option<Self::Item> {
let next = self.inner_iter.next();
match next {
None => None,
Some(x) => {
let ret = ObjPoolIndex::<T> {i: self.current_idx, type_marker: PhantomData};
self.current_idx += 1;
Some((ret, x))
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(Default)]
struct ObjPoolTestObject {
foo: u32
}
#[test]
fn objpool_basic_works() {
let mut pool = ObjPool::<ObjPoolTestObject>::new();
let x = pool.alloc();
let y = pool.alloc();
{
let o = pool.get_mut(x);
o.foo = 123;
}
{
let o = pool.get_mut(y);
o.foo = 456;
}
let ox = pool.get(x);
let oy = pool.get(y);
assert_eq!(ox.foo, 123);
assert_eq!(oy.foo, 456);
}
}
| eq | identifier_name |
objpool.rs | /*
Copyright (c) 2016-2017, Robert Ou <[email protected]> and contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::slice::{Iter, IterMut};
use serde_derive::{Deserialize, Serialize};
use slog;
#[derive(Debug, Serialize, Deserialize)]
pub struct ObjPoolIndex<T> {
i: usize,
type_marker: PhantomData<T>
}
impl<T> Copy for ObjPoolIndex<T> { }
impl<T> Clone for ObjPoolIndex<T> {
fn clone(&self) -> ObjPoolIndex<T> {
*self
}
}
impl<T> PartialEq for ObjPoolIndex<T> {
fn eq(&self, other: &ObjPoolIndex<T>) -> bool {
self.i == other.i
}
}
impl<T> Eq for ObjPoolIndex<T> { }
impl<T> Hash for ObjPoolIndex<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.i.hash(state);
}
}
impl<T> ObjPoolIndex<T> {
pub fn get_raw_i(&self) -> usize {
self.i
}
}
impl<T> slog::Value for ObjPoolIndex<T> {
fn serialize(&self, _record: &slog::Record, key: slog::Key, serializer: &mut dyn slog::Serializer) -> slog::Result {
serializer.emit_usize(key, self.i)
}
}
#[derive(Clone, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)]
pub struct ObjPool<T> {
storage: Vec<T>
}
impl<T> ObjPool<T> {
pub fn new() -> ObjPool<T> {
ObjPool {storage: Vec::new()}
}
pub fn insert(&mut self, item: T) -> ObjPoolIndex<T> {
let i = self.storage.len();
self.storage.push(item);
ObjPoolIndex::<T> {i: i, type_marker: PhantomData}
}
pub fn get(&self, i: ObjPoolIndex<T>) -> &T {
&self.storage[i.i]
}
pub fn get_mut(&mut self, i: ObjPoolIndex<T>) -> &mut T {
&mut self.storage[i.i]
}
pub fn iter_idx(&self) -> ObjPoolIdxIterator<T> {
ObjPoolIdxIterator {
pool: self,
current_idx: 0,
}
}
pub fn iter(&self) -> Iter<T> {
self.storage.iter()
}
pub fn iter_mut(&mut self) -> IterMut<T> {
self.storage.iter_mut()
}
pub fn iter_mut_idx(&mut self) -> ObjPoolMutIdxIterator<T> {
ObjPoolMutIdxIterator {
inner_iter: self.storage.iter_mut(),
current_idx: 0,
}
}
pub fn len(&self) -> usize {
self.storage.len()
}
}
impl<T: Default> ObjPool<T> {
pub fn alloc(&mut self) -> ObjPoolIndex<T> {
let i = self.storage.len();
let o = T::default();
self.storage.push(o);
ObjPoolIndex::<T> {i: i, type_marker: PhantomData}
}
}
pub struct ObjPoolIdxIterator<'a, T: 'a> {
pool: &'a ObjPool<T>,
current_idx: usize,
}
impl<'a, T> Iterator for ObjPoolIdxIterator<'a, T> {
type Item = ObjPoolIndex<T>;
fn next(&mut self) -> Option<ObjPoolIndex<T>> {
if self.current_idx == self.pool.storage.len() {
None
} else {
let ret = ObjPoolIndex::<T> {i: self.current_idx, type_marker: PhantomData};
self.current_idx += 1;
Some(ret)
}
}
}
pub struct ObjPoolMutIdxIterator<'a, T: 'a> {
inner_iter: IterMut<'a, T>,
current_idx: usize,
}
impl<'a, T> Iterator for ObjPoolMutIdxIterator<'a, T> {
type Item = (ObjPoolIndex<T>, &'a mut T); | let next = self.inner_iter.next();
match next {
None => None,
Some(x) => {
let ret = ObjPoolIndex::<T> {i: self.current_idx, type_marker: PhantomData};
self.current_idx += 1;
Some((ret, x))
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(Default)]
struct ObjPoolTestObject {
foo: u32
}
#[test]
fn objpool_basic_works() {
let mut pool = ObjPool::<ObjPoolTestObject>::new();
let x = pool.alloc();
let y = pool.alloc();
{
let o = pool.get_mut(x);
o.foo = 123;
}
{
let o = pool.get_mut(y);
o.foo = 456;
}
let ox = pool.get(x);
let oy = pool.get(y);
assert_eq!(ox.foo, 123);
assert_eq!(oy.foo, 456);
}
} |
fn next(&mut self) -> Option<Self::Item> { | random_line_split |
trait-inheritance2.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | trait Bar { fn g(&self) -> int; }
trait Baz { fn h(&self) -> int; }
trait Quux: Foo + Bar + Baz { }
struct A { x: int }
impl Foo for A { fn f(&self) -> int { 10 } }
impl Bar for A { fn g(&self) -> int { 20 } }
impl Baz for A { fn h(&self) -> int { 30 } }
impl Quux for A {}
fn f<T:Quux + Foo + Bar + Baz>(a: &T) {
assert_eq!(a.f(), 10);
assert_eq!(a.g(), 20);
assert_eq!(a.h(), 30);
}
pub fn main() {
let a = &A { x: 3 };
f(a);
} | // option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Foo { fn f(&self) -> int; } | random_line_split |
trait-inheritance2.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Foo { fn f(&self) -> int; }
trait Bar { fn g(&self) -> int; }
trait Baz { fn h(&self) -> int; }
trait Quux: Foo + Bar + Baz { }
struct A { x: int }
impl Foo for A { fn f(&self) -> int { 10 } }
impl Bar for A { fn g(&self) -> int { 20 } }
impl Baz for A { fn | (&self) -> int { 30 } }
impl Quux for A {}
fn f<T:Quux + Foo + Bar + Baz>(a: &T) {
assert_eq!(a.f(), 10);
assert_eq!(a.g(), 20);
assert_eq!(a.h(), 30);
}
pub fn main() {
let a = &A { x: 3 };
f(a);
}
| h | identifier_name |
trait-inheritance2.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Foo { fn f(&self) -> int; }
trait Bar { fn g(&self) -> int; }
trait Baz { fn h(&self) -> int; }
trait Quux: Foo + Bar + Baz { }
struct A { x: int }
impl Foo for A { fn f(&self) -> int | }
impl Bar for A { fn g(&self) -> int { 20 } }
impl Baz for A { fn h(&self) -> int { 30 } }
impl Quux for A {}
fn f<T:Quux + Foo + Bar + Baz>(a: &T) {
assert_eq!(a.f(), 10);
assert_eq!(a.g(), 20);
assert_eq!(a.h(), 30);
}
pub fn main() {
let a = &A { x: 3 };
f(a);
}
| { 10 } | identifier_body |
benchmarks.rs | /*
* Copyright 2020 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at | * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#[macro_use]
extern crate bencher;
extern crate flatbuffers;
extern crate flexbuffers;
mod flatbuffers_benchmarks;
mod flexbuffers_benchmarks;
#[allow(dead_code, unused_imports)]
#[path = "../../include_test/include_test1_generated.rs"]
pub mod include_test1_generated;
#[allow(dead_code, unused_imports)]
#[path = "../../include_test/sub/include_test2_generated.rs"]
pub mod include_test2_generated;
#[allow(dead_code, unused_imports)]
#[path = "../../monster_test_generated.rs"]
mod monster_test_generated;
pub use monster_test_generated::my_game;
benchmark_main!(
flatbuffers_benchmarks::benches,
flexbuffers_benchmarks::benches
); | *
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software | random_line_split |
lib.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A collection of numeric types and traits for Rust.
//!
//! This includes new types for big integers, rationals, and complex numbers,
//! new traits for generic programming on numeric properties like `Integer`,
//! and generic range iterators.
//!
//! ## Example
//!
//! This example uses the BigRational type and [Newton's method][newt] to
//! approximate a square root to arbitrary precision:
//!
//! ```
//! extern crate num;
//! # #[cfg(all(feature = "bigint", feature="rational"))]
//! # mod test {
//!
//! use num::FromPrimitive;
//! use num::bigint::BigInt;
//! use num::rational::{Ratio, BigRational};
//!
//! # pub
//! fn approx_sqrt(number: u64, iterations: usize) -> BigRational {
//! let start: Ratio<BigInt> = Ratio::from_integer(FromPrimitive::from_u64(number).unwrap());
//! let mut approx = start.clone();
//!
//! for _ in 0..iterations {
//! approx = (&approx + (&start / &approx)) /
//! Ratio::from_integer(FromPrimitive::from_u64(2).unwrap());
//! }
//!
//! approx
//! }
//! # }
//! # #[cfg(not(all(feature = "bigint", feature="rational")))]
//! # mod test { pub fn approx_sqrt(n: u64, _: usize) -> u64 { n } }
//! # use test::approx_sqrt;
//!
//! fn main() {
//! println!("{}", approx_sqrt(10, 4)); // prints 4057691201/1283082416
//! }
//!
//! ```
//!
//! [newt]: https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Babylonian_method
#![doc(html_logo_url = "http://rust-num.github.io/num/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://rust-num.github.io/num/favicon.ico",
html_root_url = "http://rust-num.github.io/num/",
html_playground_url = "http://play.rust-lang.org/")]
#[cfg(feature = "rustc-serialize")]
extern crate rustc_serialize;
// Some of the tests of non-RNG-based functionality are randomized using the
// RNG-based functionality, so the RNG-based functionality needs to be enabled
// for tests.
#[cfg(any(feature = "rand", all(feature = "bigint", test)))]
extern crate rand;
#[cfg(feature = "bigint")]
pub use bigint::{BigInt, BigUint};
#[cfg(feature = "rational")]
pub use rational::Rational;
#[cfg(all(feature = "rational", feature="bigint"))]
pub use rational::BigRational;
#[cfg(feature = "complex")]
pub use complex::Complex;
pub use integer::Integer;
pub use iter::{range, range_inclusive, range_step, range_step_inclusive};
pub use traits::{Num, Zero, One, Signed, Unsigned, Bounded,
Saturating, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv,
PrimInt, Float, ToPrimitive, FromPrimitive, NumCast, cast};
#[cfg(test)] use std::hash;
use std::ops::{Mul};
#[cfg(feature = "bigint")]
pub mod bigint;
pub mod complex;
pub mod integer;
pub mod iter;
pub mod traits;
#[cfg(feature = "rational")]
pub mod rational;
/// Returns the additive identity, `0`.
#[inline(always)] pub fn zero<T: Zero>() -> T { Zero::zero() }
/// Returns the multiplicative identity, `1`.
#[inline(always)] pub fn one<T: One>() -> T { One::one() }
/// Computes the absolute value.
///
/// For `f32` and `f64`, `NaN` will be returned if the number is `NaN`
///
/// For signed integers, `::MIN` will be returned if the number is `::MIN`.
#[inline(always)]
pub fn abs<T: Signed>(value: T) -> T {
value.abs()
}
/// The positive difference of two numbers.
///
/// Returns zero if `x` is less than or equal to `y`, otherwise the difference
/// between `x` and `y` is returned.
#[inline(always)]
pub fn abs_sub<T: Signed>(x: T, y: T) -> T {
x.abs_sub(&y)
}
/// Returns the sign of the number.
///
/// For `f32` and `f64`:
///
/// * `1.0` if the number is positive, `+0.0` or `INFINITY`
/// * `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
/// * `NaN` if the number is `NaN`
///
/// For signed integers:
///
/// * `0` if the number is zero
/// * `1` if the number is positive
/// * `-1` if the number is negative
#[inline(always)] pub fn signum<T: Signed>(value: T) -> T { value.signum() }
/// Raises a value to the power of exp, using exponentiation by squaring.
///
/// # Example
///
/// ```rust
/// use num;
///
/// assert_eq!(num::pow(2i8, 4), 16);
/// assert_eq!(num::pow(6u8, 3), 216);
/// ```
#[inline]
pub fn pow<T: Clone + One + Mul<T, Output = T>>(mut base: T, mut exp: usize) -> T {
if exp == 0 { return T::one() }
while exp & 1 == 0 {
base = base.clone() * base;
exp >>= 1;
}
if exp == 1 { return base }
let mut acc = base.clone();
while exp > 1 {
exp >>= 1;
base = base.clone() * base;
if exp & 1 == 1 {
acc = acc * base.clone();
}
}
acc
}
#[cfg(test)]
fn hash<T: hash::Hash>(x: &T) -> u64 | {
use std::hash::Hasher;
let mut hasher = hash::SipHasher::new();
x.hash(&mut hasher);
hasher.finish()
} | identifier_body |
|
lib.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A collection of numeric types and traits for Rust.
//!
//! This includes new types for big integers, rationals, and complex numbers,
//! new traits for generic programming on numeric properties like `Integer`,
//! and generic range iterators.
//!
//! ## Example
//! | //! ```
//! extern crate num;
//! # #[cfg(all(feature = "bigint", feature="rational"))]
//! # mod test {
//!
//! use num::FromPrimitive;
//! use num::bigint::BigInt;
//! use num::rational::{Ratio, BigRational};
//!
//! # pub
//! fn approx_sqrt(number: u64, iterations: usize) -> BigRational {
//! let start: Ratio<BigInt> = Ratio::from_integer(FromPrimitive::from_u64(number).unwrap());
//! let mut approx = start.clone();
//!
//! for _ in 0..iterations {
//! approx = (&approx + (&start / &approx)) /
//! Ratio::from_integer(FromPrimitive::from_u64(2).unwrap());
//! }
//!
//! approx
//! }
//! # }
//! # #[cfg(not(all(feature = "bigint", feature="rational")))]
//! # mod test { pub fn approx_sqrt(n: u64, _: usize) -> u64 { n } }
//! # use test::approx_sqrt;
//!
//! fn main() {
//! println!("{}", approx_sqrt(10, 4)); // prints 4057691201/1283082416
//! }
//!
//! ```
//!
//! [newt]: https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Babylonian_method
#![doc(html_logo_url = "http://rust-num.github.io/num/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://rust-num.github.io/num/favicon.ico",
html_root_url = "http://rust-num.github.io/num/",
html_playground_url = "http://play.rust-lang.org/")]
#[cfg(feature = "rustc-serialize")]
extern crate rustc_serialize;
// Some of the tests of non-RNG-based functionality are randomized using the
// RNG-based functionality, so the RNG-based functionality needs to be enabled
// for tests.
#[cfg(any(feature = "rand", all(feature = "bigint", test)))]
extern crate rand;
#[cfg(feature = "bigint")]
pub use bigint::{BigInt, BigUint};
#[cfg(feature = "rational")]
pub use rational::Rational;
#[cfg(all(feature = "rational", feature="bigint"))]
pub use rational::BigRational;
#[cfg(feature = "complex")]
pub use complex::Complex;
pub use integer::Integer;
pub use iter::{range, range_inclusive, range_step, range_step_inclusive};
pub use traits::{Num, Zero, One, Signed, Unsigned, Bounded,
Saturating, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv,
PrimInt, Float, ToPrimitive, FromPrimitive, NumCast, cast};
#[cfg(test)] use std::hash;
use std::ops::{Mul};
#[cfg(feature = "bigint")]
pub mod bigint;
pub mod complex;
pub mod integer;
pub mod iter;
pub mod traits;
#[cfg(feature = "rational")]
pub mod rational;
/// Returns the additive identity, `0`.
#[inline(always)] pub fn zero<T: Zero>() -> T { Zero::zero() }
/// Returns the multiplicative identity, `1`.
#[inline(always)] pub fn one<T: One>() -> T { One::one() }
/// Computes the absolute value.
///
/// For `f32` and `f64`, `NaN` will be returned if the number is `NaN`
///
/// For signed integers, `::MIN` will be returned if the number is `::MIN`.
#[inline(always)]
pub fn abs<T: Signed>(value: T) -> T {
value.abs()
}
/// The positive difference of two numbers.
///
/// Returns zero if `x` is less than or equal to `y`, otherwise the difference
/// between `x` and `y` is returned.
#[inline(always)]
pub fn abs_sub<T: Signed>(x: T, y: T) -> T {
x.abs_sub(&y)
}
/// Returns the sign of the number.
///
/// For `f32` and `f64`:
///
/// * `1.0` if the number is positive, `+0.0` or `INFINITY`
/// * `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
/// * `NaN` if the number is `NaN`
///
/// For signed integers:
///
/// * `0` if the number is zero
/// * `1` if the number is positive
/// * `-1` if the number is negative
#[inline(always)] pub fn signum<T: Signed>(value: T) -> T { value.signum() }
/// Raises a value to the power of exp, using exponentiation by squaring.
///
/// # Example
///
/// ```rust
/// use num;
///
/// assert_eq!(num::pow(2i8, 4), 16);
/// assert_eq!(num::pow(6u8, 3), 216);
/// ```
#[inline]
pub fn pow<T: Clone + One + Mul<T, Output = T>>(mut base: T, mut exp: usize) -> T {
if exp == 0 { return T::one() }
while exp & 1 == 0 {
base = base.clone() * base;
exp >>= 1;
}
if exp == 1 { return base }
let mut acc = base.clone();
while exp > 1 {
exp >>= 1;
base = base.clone() * base;
if exp & 1 == 1 {
acc = acc * base.clone();
}
}
acc
}
#[cfg(test)]
fn hash<T: hash::Hash>(x: &T) -> u64 {
use std::hash::Hasher;
let mut hasher = hash::SipHasher::new();
x.hash(&mut hasher);
hasher.finish()
} | //! This example uses the BigRational type and [Newton's method][newt] to
//! approximate a square root to arbitrary precision:
//! | random_line_split |
lib.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A collection of numeric types and traits for Rust.
//!
//! This includes new types for big integers, rationals, and complex numbers,
//! new traits for generic programming on numeric properties like `Integer`,
//! and generic range iterators.
//!
//! ## Example
//!
//! This example uses the BigRational type and [Newton's method][newt] to
//! approximate a square root to arbitrary precision:
//!
//! ```
//! extern crate num;
//! # #[cfg(all(feature = "bigint", feature="rational"))]
//! # mod test {
//!
//! use num::FromPrimitive;
//! use num::bigint::BigInt;
//! use num::rational::{Ratio, BigRational};
//!
//! # pub
//! fn approx_sqrt(number: u64, iterations: usize) -> BigRational {
//! let start: Ratio<BigInt> = Ratio::from_integer(FromPrimitive::from_u64(number).unwrap());
//! let mut approx = start.clone();
//!
//! for _ in 0..iterations {
//! approx = (&approx + (&start / &approx)) /
//! Ratio::from_integer(FromPrimitive::from_u64(2).unwrap());
//! }
//!
//! approx
//! }
//! # }
//! # #[cfg(not(all(feature = "bigint", feature="rational")))]
//! # mod test { pub fn approx_sqrt(n: u64, _: usize) -> u64 { n } }
//! # use test::approx_sqrt;
//!
//! fn main() {
//! println!("{}", approx_sqrt(10, 4)); // prints 4057691201/1283082416
//! }
//!
//! ```
//!
//! [newt]: https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Babylonian_method
#![doc(html_logo_url = "http://rust-num.github.io/num/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://rust-num.github.io/num/favicon.ico",
html_root_url = "http://rust-num.github.io/num/",
html_playground_url = "http://play.rust-lang.org/")]
#[cfg(feature = "rustc-serialize")]
extern crate rustc_serialize;
// Some of the tests of non-RNG-based functionality are randomized using the
// RNG-based functionality, so the RNG-based functionality needs to be enabled
// for tests.
#[cfg(any(feature = "rand", all(feature = "bigint", test)))]
extern crate rand;
#[cfg(feature = "bigint")]
pub use bigint::{BigInt, BigUint};
#[cfg(feature = "rational")]
pub use rational::Rational;
#[cfg(all(feature = "rational", feature="bigint"))]
pub use rational::BigRational;
#[cfg(feature = "complex")]
pub use complex::Complex;
pub use integer::Integer;
pub use iter::{range, range_inclusive, range_step, range_step_inclusive};
pub use traits::{Num, Zero, One, Signed, Unsigned, Bounded,
Saturating, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv,
PrimInt, Float, ToPrimitive, FromPrimitive, NumCast, cast};
#[cfg(test)] use std::hash;
use std::ops::{Mul};
#[cfg(feature = "bigint")]
pub mod bigint;
pub mod complex;
pub mod integer;
pub mod iter;
pub mod traits;
#[cfg(feature = "rational")]
pub mod rational;
/// Returns the additive identity, `0`.
#[inline(always)] pub fn zero<T: Zero>() -> T { Zero::zero() }
/// Returns the multiplicative identity, `1`.
#[inline(always)] pub fn one<T: One>() -> T { One::one() }
/// Computes the absolute value.
///
/// For `f32` and `f64`, `NaN` will be returned if the number is `NaN`
///
/// For signed integers, `::MIN` will be returned if the number is `::MIN`.
#[inline(always)]
pub fn abs<T: Signed>(value: T) -> T {
value.abs()
}
/// The positive difference of two numbers.
///
/// Returns zero if `x` is less than or equal to `y`, otherwise the difference
/// between `x` and `y` is returned.
#[inline(always)]
pub fn abs_sub<T: Signed>(x: T, y: T) -> T {
x.abs_sub(&y)
}
/// Returns the sign of the number.
///
/// For `f32` and `f64`:
///
/// * `1.0` if the number is positive, `+0.0` or `INFINITY`
/// * `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
/// * `NaN` if the number is `NaN`
///
/// For signed integers:
///
/// * `0` if the number is zero
/// * `1` if the number is positive
/// * `-1` if the number is negative
#[inline(always)] pub fn signum<T: Signed>(value: T) -> T { value.signum() }
/// Raises a value to the power of exp, using exponentiation by squaring.
///
/// # Example
///
/// ```rust
/// use num;
///
/// assert_eq!(num::pow(2i8, 4), 16);
/// assert_eq!(num::pow(6u8, 3), 216);
/// ```
#[inline]
pub fn pow<T: Clone + One + Mul<T, Output = T>>(mut base: T, mut exp: usize) -> T {
if exp == 0 |
while exp & 1 == 0 {
base = base.clone() * base;
exp >>= 1;
}
if exp == 1 { return base }
let mut acc = base.clone();
while exp > 1 {
exp >>= 1;
base = base.clone() * base;
if exp & 1 == 1 {
acc = acc * base.clone();
}
}
acc
}
#[cfg(test)]
fn hash<T: hash::Hash>(x: &T) -> u64 {
use std::hash::Hasher;
let mut hasher = hash::SipHasher::new();
x.hash(&mut hasher);
hasher.finish()
}
| { return T::one() } | conditional_block |
lib.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A collection of numeric types and traits for Rust.
//!
//! This includes new types for big integers, rationals, and complex numbers,
//! new traits for generic programming on numeric properties like `Integer`,
//! and generic range iterators.
//!
//! ## Example
//!
//! This example uses the BigRational type and [Newton's method][newt] to
//! approximate a square root to arbitrary precision:
//!
//! ```
//! extern crate num;
//! # #[cfg(all(feature = "bigint", feature="rational"))]
//! # mod test {
//!
//! use num::FromPrimitive;
//! use num::bigint::BigInt;
//! use num::rational::{Ratio, BigRational};
//!
//! # pub
//! fn approx_sqrt(number: u64, iterations: usize) -> BigRational {
//! let start: Ratio<BigInt> = Ratio::from_integer(FromPrimitive::from_u64(number).unwrap());
//! let mut approx = start.clone();
//!
//! for _ in 0..iterations {
//! approx = (&approx + (&start / &approx)) /
//! Ratio::from_integer(FromPrimitive::from_u64(2).unwrap());
//! }
//!
//! approx
//! }
//! # }
//! # #[cfg(not(all(feature = "bigint", feature="rational")))]
//! # mod test { pub fn approx_sqrt(n: u64, _: usize) -> u64 { n } }
//! # use test::approx_sqrt;
//!
//! fn main() {
//! println!("{}", approx_sqrt(10, 4)); // prints 4057691201/1283082416
//! }
//!
//! ```
//!
//! [newt]: https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Babylonian_method
#![doc(html_logo_url = "http://rust-num.github.io/num/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://rust-num.github.io/num/favicon.ico",
html_root_url = "http://rust-num.github.io/num/",
html_playground_url = "http://play.rust-lang.org/")]
#[cfg(feature = "rustc-serialize")]
extern crate rustc_serialize;
// Some of the tests of non-RNG-based functionality are randomized using the
// RNG-based functionality, so the RNG-based functionality needs to be enabled
// for tests.
#[cfg(any(feature = "rand", all(feature = "bigint", test)))]
extern crate rand;
#[cfg(feature = "bigint")]
pub use bigint::{BigInt, BigUint};
#[cfg(feature = "rational")]
pub use rational::Rational;
#[cfg(all(feature = "rational", feature="bigint"))]
pub use rational::BigRational;
#[cfg(feature = "complex")]
pub use complex::Complex;
pub use integer::Integer;
pub use iter::{range, range_inclusive, range_step, range_step_inclusive};
pub use traits::{Num, Zero, One, Signed, Unsigned, Bounded,
Saturating, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv,
PrimInt, Float, ToPrimitive, FromPrimitive, NumCast, cast};
#[cfg(test)] use std::hash;
use std::ops::{Mul};
#[cfg(feature = "bigint")]
pub mod bigint;
pub mod complex;
pub mod integer;
pub mod iter;
pub mod traits;
#[cfg(feature = "rational")]
pub mod rational;
/// Returns the additive identity, `0`.
#[inline(always)] pub fn zero<T: Zero>() -> T { Zero::zero() }
/// Returns the multiplicative identity, `1`.
#[inline(always)] pub fn | <T: One>() -> T { One::one() }
/// Computes the absolute value.
///
/// For `f32` and `f64`, `NaN` will be returned if the number is `NaN`
///
/// For signed integers, `::MIN` will be returned if the number is `::MIN`.
#[inline(always)]
pub fn abs<T: Signed>(value: T) -> T {
value.abs()
}
/// The positive difference of two numbers.
///
/// Returns zero if `x` is less than or equal to `y`, otherwise the difference
/// between `x` and `y` is returned.
#[inline(always)]
pub fn abs_sub<T: Signed>(x: T, y: T) -> T {
x.abs_sub(&y)
}
/// Returns the sign of the number.
///
/// For `f32` and `f64`:
///
/// * `1.0` if the number is positive, `+0.0` or `INFINITY`
/// * `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
/// * `NaN` if the number is `NaN`
///
/// For signed integers:
///
/// * `0` if the number is zero
/// * `1` if the number is positive
/// * `-1` if the number is negative
#[inline(always)] pub fn signum<T: Signed>(value: T) -> T { value.signum() }
/// Raises a value to the power of exp, using exponentiation by squaring.
///
/// # Example
///
/// ```rust
/// use num;
///
/// assert_eq!(num::pow(2i8, 4), 16);
/// assert_eq!(num::pow(6u8, 3), 216);
/// ```
#[inline]
pub fn pow<T: Clone + One + Mul<T, Output = T>>(mut base: T, mut exp: usize) -> T {
if exp == 0 { return T::one() }
while exp & 1 == 0 {
base = base.clone() * base;
exp >>= 1;
}
if exp == 1 { return base }
let mut acc = base.clone();
while exp > 1 {
exp >>= 1;
base = base.clone() * base;
if exp & 1 == 1 {
acc = acc * base.clone();
}
}
acc
}
#[cfg(test)]
fn hash<T: hash::Hash>(x: &T) -> u64 {
use std::hash::Hasher;
let mut hasher = hash::SipHasher::new();
x.hash(&mut hasher);
hasher.finish()
}
| one | identifier_name |
main.rs | extern crate serenity;
use serenity::prelude::*;
use serenity::model::*;
use std::env;
struct Handler;
impl EventHandler for Handler {
// Set a handler for the `on_message` event - so that whenever a new message
// is received - the closure (or function) passed will be called.
//
// Event handlers are dispatched through multi-threading, and so multiple
// of a single event can be dispatched simultaneously.
fn on_message(&self, _: Context, msg: Message) {
if msg.content == "!ping" |
}
// Set a handler to be called on the `on_ready` event. This is called when a
// shard is booted, and a READY payload is sent by Discord. This payload
// contains data like the current user's guild Ids, current user data,
// private channels, and more.
//
// In this case, just print what the current user's username is.
fn on_ready(&self, _: Context, ready: Ready) {
println!("{} is connected!", ready.user.name);
}
}
fn main() {
// Configure the client with your Discord bot token in the environment.
let token = env::var("DISCORD_TOKEN")
.expect("Expected a token in the environment");
// Create a new instance of the Client, logging in as a bot. This will
// automatically prepend your bot token with "Bot ", which is a requirement
// by Discord for bot users.
let mut client = Client::new(&token, Handler);
// Finally, start a single shard, and start listening to events.
//
// Shards will automatically attempt to reconnect, and will perform
// exponential backoff until it reconnects.
if let Err(why) = client.start() {
println!("Client error: {:?}", why);
}
}
| {
// Sending a message can fail, due to a network error, an
// authentication error, or lack of permissions to post in the
// channel, so log to stdout when some error happens, with a
// description of it.
if let Err(why) = msg.channel_id.say("Pong!") {
println!("Error sending message: {:?}", why);
}
} | conditional_block |
main.rs | extern crate serenity;
use serenity::prelude::*;
use serenity::model::*;
use std::env;
struct Handler;
impl EventHandler for Handler {
// Set a handler for the `on_message` event - so that whenever a new message
// is received - the closure (or function) passed will be called.
//
// Event handlers are dispatched through multi-threading, and so multiple
// of a single event can be dispatched simultaneously.
fn | (&self, _: Context, msg: Message) {
if msg.content == "!ping" {
// Sending a message can fail, due to a network error, an
// authentication error, or lack of permissions to post in the
// channel, so log to stdout when some error happens, with a
// description of it.
if let Err(why) = msg.channel_id.say("Pong!") {
println!("Error sending message: {:?}", why);
}
}
}
// Set a handler to be called on the `on_ready` event. This is called when a
// shard is booted, and a READY payload is sent by Discord. This payload
// contains data like the current user's guild Ids, current user data,
// private channels, and more.
//
// In this case, just print what the current user's username is.
fn on_ready(&self, _: Context, ready: Ready) {
println!("{} is connected!", ready.user.name);
}
}
fn main() {
// Configure the client with your Discord bot token in the environment.
let token = env::var("DISCORD_TOKEN")
.expect("Expected a token in the environment");
// Create a new instance of the Client, logging in as a bot. This will
// automatically prepend your bot token with "Bot ", which is a requirement
// by Discord for bot users.
let mut client = Client::new(&token, Handler);
// Finally, start a single shard, and start listening to events.
//
// Shards will automatically attempt to reconnect, and will perform
// exponential backoff until it reconnects.
if let Err(why) = client.start() {
println!("Client error: {:?}", why);
}
}
| on_message | identifier_name |
main.rs | extern crate serenity;
use serenity::prelude::*;
use serenity::model::*;
use std::env;
struct Handler;
impl EventHandler for Handler {
// Set a handler for the `on_message` event - so that whenever a new message
// is received - the closure (or function) passed will be called.
//
// Event handlers are dispatched through multi-threading, and so multiple
// of a single event can be dispatched simultaneously.
fn on_message(&self, _: Context, msg: Message) |
// Set a handler to be called on the `on_ready` event. This is called when a
// shard is booted, and a READY payload is sent by Discord. This payload
// contains data like the current user's guild Ids, current user data,
// private channels, and more.
//
// In this case, just print what the current user's username is.
fn on_ready(&self, _: Context, ready: Ready) {
println!("{} is connected!", ready.user.name);
}
}
fn main() {
// Configure the client with your Discord bot token in the environment.
let token = env::var("DISCORD_TOKEN")
.expect("Expected a token in the environment");
// Create a new instance of the Client, logging in as a bot. This will
// automatically prepend your bot token with "Bot ", which is a requirement
// by Discord for bot users.
let mut client = Client::new(&token, Handler);
// Finally, start a single shard, and start listening to events.
//
// Shards will automatically attempt to reconnect, and will perform
// exponential backoff until it reconnects.
if let Err(why) = client.start() {
println!("Client error: {:?}", why);
}
}
| {
if msg.content == "!ping" {
// Sending a message can fail, due to a network error, an
// authentication error, or lack of permissions to post in the
// channel, so log to stdout when some error happens, with a
// description of it.
if let Err(why) = msg.channel_id.say("Pong!") {
println!("Error sending message: {:?}", why);
}
}
} | identifier_body |
main.rs | extern crate serenity;
use serenity::prelude::*;
use serenity::model::*;
use std::env;
struct Handler;
impl EventHandler for Handler {
// Set a handler for the `on_message` event - so that whenever a new message
// is received - the closure (or function) passed will be called.
//
// Event handlers are dispatched through multi-threading, and so multiple
// of a single event can be dispatched simultaneously.
fn on_message(&self, _: Context, msg: Message) {
if msg.content == "!ping" {
// Sending a message can fail, due to a network error, an
// authentication error, or lack of permissions to post in the
// channel, so log to stdout when some error happens, with a
// description of it.
if let Err(why) = msg.channel_id.say("Pong!") {
println!("Error sending message: {:?}", why);
}
}
} | // Set a handler to be called on the `on_ready` event. This is called when a
// shard is booted, and a READY payload is sent by Discord. This payload
// contains data like the current user's guild Ids, current user data,
// private channels, and more.
//
// In this case, just print what the current user's username is.
fn on_ready(&self, _: Context, ready: Ready) {
println!("{} is connected!", ready.user.name);
}
}
fn main() {
// Configure the client with your Discord bot token in the environment.
let token = env::var("DISCORD_TOKEN")
.expect("Expected a token in the environment");
// Create a new instance of the Client, logging in as a bot. This will
// automatically prepend your bot token with "Bot ", which is a requirement
// by Discord for bot users.
let mut client = Client::new(&token, Handler);
// Finally, start a single shard, and start listening to events.
//
// Shards will automatically attempt to reconnect, and will perform
// exponential backoff until it reconnects.
if let Err(why) = client.start() {
println!("Client error: {:?}", why);
}
} | random_line_split |
|
bin.rs | #[macro_use]
extern crate malachite_base_test_util;
extern crate malachite_nz;
extern crate malachite_nz_test_util;
extern crate malachite_q;
extern crate serde;
extern crate serde_json;
use crate::demo_and_bench::register;
use malachite_base_test_util::runner::cmd::read_command_line_arguments;
use malachite_base_test_util::runner::Runner;
// Examples:
//
// cargo run -- -l 10000 -m special_random -d demo_from_naturals -c "mean_bits_n 128 mean_bits_d 1"
fn main() {
let args = read_command_line_arguments("malachite-q test utils");
let mut runner = Runner::new();
register(&mut runner);
if let Some(demo_key) = args.demo_key {
runner.run_demo(&demo_key, args.generation_mode, args.config, args.limit);
} else if let Some(bench_key) = args.bench_key {
runner.run_bench(
&bench_key,
args.generation_mode,
args.config,
args.limit,
&args.out,
);
} else {
panic!();
} |
mod demo_and_bench; | } | random_line_split |
bin.rs | #[macro_use]
extern crate malachite_base_test_util;
extern crate malachite_nz;
extern crate malachite_nz_test_util;
extern crate malachite_q;
extern crate serde;
extern crate serde_json;
use crate::demo_and_bench::register;
use malachite_base_test_util::runner::cmd::read_command_line_arguments;
use malachite_base_test_util::runner::Runner;
// Examples:
//
// cargo run -- -l 10000 -m special_random -d demo_from_naturals -c "mean_bits_n 128 mean_bits_d 1"
fn | () {
let args = read_command_line_arguments("malachite-q test utils");
let mut runner = Runner::new();
register(&mut runner);
if let Some(demo_key) = args.demo_key {
runner.run_demo(&demo_key, args.generation_mode, args.config, args.limit);
} else if let Some(bench_key) = args.bench_key {
runner.run_bench(
&bench_key,
args.generation_mode,
args.config,
args.limit,
&args.out,
);
} else {
panic!();
}
}
mod demo_and_bench;
| main | identifier_name |
bin.rs | #[macro_use]
extern crate malachite_base_test_util;
extern crate malachite_nz;
extern crate malachite_nz_test_util;
extern crate malachite_q;
extern crate serde;
extern crate serde_json;
use crate::demo_and_bench::register;
use malachite_base_test_util::runner::cmd::read_command_line_arguments;
use malachite_base_test_util::runner::Runner;
// Examples:
//
// cargo run -- -l 10000 -m special_random -d demo_from_naturals -c "mean_bits_n 128 mean_bits_d 1"
fn main() {
let args = read_command_line_arguments("malachite-q test utils");
let mut runner = Runner::new();
register(&mut runner);
if let Some(demo_key) = args.demo_key {
runner.run_demo(&demo_key, args.generation_mode, args.config, args.limit);
} else if let Some(bench_key) = args.bench_key | else {
panic!();
}
}
mod demo_and_bench;
| {
runner.run_bench(
&bench_key,
args.generation_mode,
args.config,
args.limit,
&args.out,
);
} | conditional_block |
bin.rs | #[macro_use]
extern crate malachite_base_test_util;
extern crate malachite_nz;
extern crate malachite_nz_test_util;
extern crate malachite_q;
extern crate serde;
extern crate serde_json;
use crate::demo_and_bench::register;
use malachite_base_test_util::runner::cmd::read_command_line_arguments;
use malachite_base_test_util::runner::Runner;
// Examples:
//
// cargo run -- -l 10000 -m special_random -d demo_from_naturals -c "mean_bits_n 128 mean_bits_d 1"
fn main() |
mod demo_and_bench;
| {
let args = read_command_line_arguments("malachite-q test utils");
let mut runner = Runner::new();
register(&mut runner);
if let Some(demo_key) = args.demo_key {
runner.run_demo(&demo_key, args.generation_mode, args.config, args.limit);
} else if let Some(bench_key) = args.bench_key {
runner.run_bench(
&bench_key,
args.generation_mode,
args.config,
args.limit,
&args.out,
);
} else {
panic!();
}
} | identifier_body |
length_expr.rs | // Copyright (c) 2015 Robert Clipsham <[email protected]> | // option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: Only field names, constants, integers, basic arithmetic expressions (+ - * / %) and parentheses are allowed in the "length" attribute
#![feature(custom_attribute, plugin)]
#![plugin(pnet_macros)]
extern crate pnet;
#[packet]
pub struct PacketWithPayload {
banana: u8,
#[length = "banana + 7.5"]
var_length: Vec<u8>,
#[payload]
payload: Vec<u8>
}
fn main() {} | //
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | random_line_split |
length_expr.rs | // Copyright (c) 2015 Robert Clipsham <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: Only field names, constants, integers, basic arithmetic expressions (+ - * / %) and parentheses are allowed in the "length" attribute
#![feature(custom_attribute, plugin)]
#![plugin(pnet_macros)]
extern crate pnet;
#[packet]
pub struct PacketWithPayload {
banana: u8,
#[length = "banana + 7.5"]
var_length: Vec<u8>,
#[payload]
payload: Vec<u8>
}
fn main() | {} | identifier_body |
|
length_expr.rs | // Copyright (c) 2015 Robert Clipsham <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: Only field names, constants, integers, basic arithmetic expressions (+ - * / %) and parentheses are allowed in the "length" attribute
#![feature(custom_attribute, plugin)]
#![plugin(pnet_macros)]
extern crate pnet;
#[packet]
pub struct PacketWithPayload {
banana: u8,
#[length = "banana + 7.5"]
var_length: Vec<u8>,
#[payload]
payload: Vec<u8>
}
fn | () {}
| main | identifier_name |
lib.rs | // =================================================================
//
// * WARNING *
//
// This file is generated!
//
// Changes made to this file will be overwritten. If changes are | // must be updated to generate the changes.
//
// =================================================================
#![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")]
//! <p>Use the AWS Elemental MediaTailor SDK to configure scalable ad insertion for your live and VOD content. With AWS Elemental MediaTailor, you can serve targeted ads to viewers while maintaining broadcast quality in over-the-top (OTT) video applications. For information about using the service, including detailed information about the settings covered in this guide, see the AWS Elemental MediaTailor User Guide.<p>Through the SDK, you manage AWS Elemental MediaTailor configurations the same as you do through the console. For example, you specify ad insertion behavior and mapping information for the origin server and the ad decision server (ADS).</p>
//!
//! If you're using the service, you're probably looking for [MediaTailorClient](struct.MediaTailorClient.html) and [MediaTailor](trait.MediaTailor.html).
extern crate futures;
#[macro_use]
extern crate log;
extern crate rusoto_core;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
mod generated;
mod custom;
pub use generated::*;
pub use custom::*; | // required to the generated code, the service_crategen project | random_line_split |
issue-17913.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT. | // option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: too big for the current architecture
#![feature(box_syntax)]
#[cfg(target_pointer_width = "64")]
fn main() {
let n = 0_usize;
let a: Box<_> = box [&n; 0xF000000000000000_usize];
println!("{}", a[0xFFFFFF_usize]);
}
#[cfg(target_pointer_width = "32")]
fn main() {
let n = 0_usize;
let a: Box<_> = box [&n; 0xFFFFFFFF_usize];
println!("{}", a[0xFFFFFF_usize]);
} | //
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | random_line_split |
issue-17913.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: too big for the current architecture
#![feature(box_syntax)]
#[cfg(target_pointer_width = "64")]
fn main() |
#[cfg(target_pointer_width = "32")]
fn main() {
let n = 0_usize;
let a: Box<_> = box [&n; 0xFFFFFFFF_usize];
println!("{}", a[0xFFFFFF_usize]);
}
| {
let n = 0_usize;
let a: Box<_> = box [&n; 0xF000000000000000_usize];
println!("{}", a[0xFFFFFF_usize]);
} | identifier_body |
issue-17913.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: too big for the current architecture
#![feature(box_syntax)]
#[cfg(target_pointer_width = "64")]
fn | () {
let n = 0_usize;
let a: Box<_> = box [&n; 0xF000000000000000_usize];
println!("{}", a[0xFFFFFF_usize]);
}
#[cfg(target_pointer_width = "32")]
fn main() {
let n = 0_usize;
let a: Box<_> = box [&n; 0xFFFFFFFF_usize];
println!("{}", a[0xFFFFFF_usize]);
}
| main | identifier_name |
traversal.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Traversals over the DOM and flow trees, running the layout computations.
use construct::FlowConstructor;
use context::LayoutContext;
use css::matching::{ApplicableDeclarations, MatchMethods, StyleSharingResult};
use flow::{MutableFlowUtils, PostorderFlowTraversal, PreorderFlowTraversal};
use flow::{self, Flow};
use incremental::{self, BUBBLE_ISIZES, REFLOW, REFLOW_OUT_OF_FLOW, RestyleDamage};
use script::layout_interface::ReflowGoal;
use selectors::bloom::BloomFilter;
use std::cell::RefCell;
use std::mem;
use util::opts;
use util::tid::tid;
use wrapper::{LayoutNode, layout_node_to_unsafe_layout_node};
use wrapper::{ThreadSafeLayoutNode, UnsafeLayoutNode};
/// Every time we do another layout, the old bloom filters are invalid. This is
/// detected by ticking a generation number every layout.
type Generation = u32;
/// A pair of the bloom filter used for css selector matching, and the node to
/// which it applies. This is used to efficiently do `Descendant` selector
/// matches. Thanks to the bloom filter, we can avoid walking up the tree
/// looking for ancestors that aren't there in the majority of cases.
///
/// As we walk down the DOM tree a task-local bloom filter is built of all the
/// CSS `SimpleSelector`s which are part of a `Descendant` compound selector
/// (i.e. paired with a `Descendant` combinator, in the `next` field of a
/// `CompoundSelector`.
///
/// Before a `Descendant` selector match is tried, it's compared against the
/// bloom filter. If the bloom filter can exclude it, the selector is quickly
/// rejected.
///
/// When done styling a node, all selectors previously inserted into the filter
/// are removed.
///
/// Since a work-stealing queue is used for styling, sometimes, the bloom filter
/// will no longer be the for the parent of the node we're currently on. When
/// this happens, the task local bloom filter will be thrown away and rebuilt.
thread_local!(
static STYLE_BLOOM: RefCell<Option<(Box<BloomFilter>, UnsafeLayoutNode, Generation)>> = RefCell::new(None));
/// Returns the task local bloom filter.
///
/// If one does not exist, a new one will be made for you. If it is out of date,
/// it will be cleared and reused.
fn take_task_local_bloom_filter(parent_node: Option<LayoutNode>, layout_context: &LayoutContext)
-> Box<BloomFilter> {
STYLE_BLOOM.with(|style_bloom| {
match (parent_node, style_bloom.borrow_mut().take()) {
// Root node. Needs new bloom filter.
(None, _ ) => {
debug!("[{}] No parent, but new bloom filter!", tid());
box BloomFilter::new()
}
// No bloom filter for this thread yet.
(Some(parent), None) => {
let mut bloom_filter = box BloomFilter::new();
insert_ancestors_into_bloom_filter(&mut bloom_filter, parent, layout_context);
bloom_filter
}
// Found cached bloom filter.
(Some(parent), Some((mut bloom_filter, old_node, old_generation))) => {
if old_node == layout_node_to_unsafe_layout_node(&parent) &&
old_generation == layout_context.shared.generation {
// Hey, the cached parent is our parent! We can reuse the bloom filter.
debug!("[{}] Parent matches (={}). Reusing bloom filter.", tid(), old_node.0);
} else {
// Oh no. the cached parent is stale. I guess we need a new one. Reuse the existing
// allocation to avoid malloc churn.
bloom_filter.clear();
insert_ancestors_into_bloom_filter(&mut bloom_filter, parent, layout_context);
}
bloom_filter
},
}
})
}
fn put_task_local_bloom_filter(bf: Box<BloomFilter>,
unsafe_node: &UnsafeLayoutNode,
layout_context: &LayoutContext) {
STYLE_BLOOM.with(move |style_bloom| {
assert!(style_bloom.borrow().is_none(),
"Putting into a never-taken task-local bloom filter");
*style_bloom.borrow_mut() = Some((bf, *unsafe_node, layout_context.shared.generation));
})
}
/// "Ancestors" in this context is inclusive of ourselves.
fn insert_ancestors_into_bloom_filter(bf: &mut Box<BloomFilter>,
mut n: LayoutNode,
layout_context: &LayoutContext) {
debug!("[{}] Inserting ancestors.", tid());
let mut ancestors = 0;
loop {
ancestors += 1;
n.insert_into_bloom_filter(&mut **bf);
n = match n.layout_parent_node(layout_context.shared) {
None => break,
Some(p) => p,
};
}
debug!("[{}] Inserted {} ancestors.", tid(), ancestors);
}
/// A top-down traversal.
pub trait PreorderDomTraversal {
/// The operation to perform. Return true to continue or false to stop.
fn process(&self, node: LayoutNode);
}
/// A bottom-up traversal, with a optional in-order pass.
pub trait PostorderDomTraversal {
/// The operation to perform. Return true to continue or false to stop.
fn process(&self, node: LayoutNode);
}
/// A bottom-up, parallelizable traversal.
pub trait PostorderNodeMutTraversal {
/// The operation to perform. Return true to continue or false to stop.
fn process<'a>(&'a mut self, node: &ThreadSafeLayoutNode<'a>) -> bool;
/// Returns true if this node should be pruned. If this returns true, we skip the operation
/// entirely and do not process any descendant nodes. This is called *before* child nodes are
/// visited. The default implementation never prunes any nodes.
fn should_prune<'a>(&'a self, _node: &ThreadSafeLayoutNode<'a>) -> bool {
false
}
}
/// The recalc-style-for-node traversal, which styles each node and must run before
/// layout computation. This computes the styles applied to each node.
#[derive(Copy, Clone)]
pub struct RecalcStyleForNode<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderDomTraversal for RecalcStyleForNode<'a> {
#[inline]
#[allow(unsafe_code)]
fn | (&self, node: LayoutNode) {
// Initialize layout data.
//
// FIXME(pcwalton): Stop allocating here. Ideally this should just be done by the HTML
// parser.
node.initialize_layout_data();
// Get the parent node.
let parent_opt = node.layout_parent_node(self.layout_context.shared);
// Get the style bloom filter.
let mut bf = take_task_local_bloom_filter(parent_opt, self.layout_context);
let nonincremental_layout = opts::get().nonincremental_layout;
if nonincremental_layout || node.is_dirty() {
// Remove existing CSS styles from nodes whose content has changed (e.g. text changed),
// to force non-incremental reflow.
if node.has_changed() {
let node = ThreadSafeLayoutNode::new(&node);
node.unstyle();
}
// Check to see whether we can share a style with someone.
let style_sharing_candidate_cache =
&mut self.layout_context.style_sharing_candidate_cache();
let sharing_result = unsafe {
node.share_style_if_possible(style_sharing_candidate_cache,
parent_opt.clone())
};
// Otherwise, match and cascade selectors.
match sharing_result {
StyleSharingResult::CannotShare(mut shareable) => {
let mut applicable_declarations = ApplicableDeclarations::new();
if node.as_element().is_some() {
// Perform the CSS selector matching.
let stylist = unsafe { &*self.layout_context.shared.stylist };
node.match_node(stylist,
Some(&*bf),
&mut applicable_declarations,
&mut shareable);
} else if node.has_changed() {
ThreadSafeLayoutNode::new(&node).set_restyle_damage(
incremental::rebuild_and_reflow())
}
// Perform the CSS cascade.
unsafe {
node.cascade_node(self.layout_context.shared,
parent_opt,
&applicable_declarations,
&mut self.layout_context.applicable_declarations_cache(),
&self.layout_context.shared.new_animations_sender);
}
// Add ourselves to the LRU cache.
if shareable {
if let Some(element) = node.as_element() {
style_sharing_candidate_cache.insert_if_possible(&element);
}
}
}
StyleSharingResult::StyleWasShared(index, damage) => {
style_sharing_candidate_cache.touch(index);
ThreadSafeLayoutNode::new(&node).set_restyle_damage(damage);
}
}
}
let unsafe_layout_node = layout_node_to_unsafe_layout_node(&node);
// Before running the children, we need to insert our nodes into the bloom
// filter.
debug!("[{}] + {:X}", tid(), unsafe_layout_node.0);
node.insert_into_bloom_filter(&mut *bf);
// NB: flow construction updates the bloom filter on the way up.
put_task_local_bloom_filter(bf, &unsafe_layout_node, self.layout_context);
}
}
/// The flow construction traversal, which builds flows for styled nodes.
#[derive(Copy, Clone)]
pub struct ConstructFlows<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderDomTraversal for ConstructFlows<'a> {
#[inline]
#[allow(unsafe_code)]
fn process(&self, node: LayoutNode) {
// Construct flows for this node.
{
let tnode = ThreadSafeLayoutNode::new(&node);
// Always reconstruct if incremental layout is turned off.
let nonincremental_layout = opts::get().nonincremental_layout;
if nonincremental_layout || node.has_dirty_descendants() {
let mut flow_constructor = FlowConstructor::new(self.layout_context);
if nonincremental_layout ||!flow_constructor.repair_if_possible(&tnode) {
flow_constructor.process(&tnode);
debug!("Constructed flow for {:x}: {:x}",
tnode.debug_id(),
tnode.flow_debug_id());
}
}
// Reset the layout damage in this node. It's been propagated to the
// flow by the flow constructor.
tnode.set_restyle_damage(RestyleDamage::empty());
}
unsafe {
node.set_changed(false);
node.set_dirty(false);
node.set_dirty_siblings(false);
node.set_dirty_descendants(false);
}
let unsafe_layout_node = layout_node_to_unsafe_layout_node(&node);
let (mut bf, old_node, old_generation) =
STYLE_BLOOM.with(|style_bloom| {
mem::replace(&mut *style_bloom.borrow_mut(), None)
.expect("The bloom filter should have been set by style recalc.")
});
assert_eq!(old_node, unsafe_layout_node);
assert_eq!(old_generation, self.layout_context.shared.generation);
match node.layout_parent_node(self.layout_context.shared) {
None => {
debug!("[{}] - {:X}, and deleting BF.", tid(), unsafe_layout_node.0);
// If this is the reflow root, eat the task-local bloom filter.
}
Some(parent) => {
// Otherwise, put it back, but remove this node.
node.remove_from_bloom_filter(&mut *bf);
let unsafe_parent = layout_node_to_unsafe_layout_node(&parent);
put_task_local_bloom_filter(bf, &unsafe_parent, self.layout_context);
},
};
}
}
/// The bubble-inline-sizes traversal, the first part of layout computation. This computes
/// preferred and intrinsic inline-sizes and bubbles them up the tree.
pub struct BubbleISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for BubbleISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.bubble_inline_sizes();
flow::mut_base(flow).restyle_damage.remove(BUBBLE_ISIZES);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.contains(BUBBLE_ISIZES)
}
}
/// The assign-inline-sizes traversal. In Gecko this corresponds to `Reflow`.
#[derive(Copy, Clone)]
pub struct AssignISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for AssignISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.assign_inline_sizes(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW)
}
}
/// The assign-block-sizes-and-store-overflow traversal, the last (and most expensive) part of
/// layout computation. Determines the final block-sizes for all layout objects, computes
/// positions, and computes overflow regions. In Gecko this corresponds to `Reflow` and
/// `FinishAndStoreOverflow`.
#[derive(Copy, Clone)]
pub struct AssignBSizesAndStoreOverflow<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for AssignBSizesAndStoreOverflow<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
// Can't do anything with flows impacted by floats until we reach their inorder parent.
// NB: We must return without resetting the restyle bits for these, as we haven't actually
// reflowed anything!
if flow::base(flow).flags.impacted_by_floats() {
return
}
flow.assign_block_size(self.layout_context);
flow.early_store_overflow(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW)
}
}
#[derive(Copy, Clone)]
pub struct ComputeAbsolutePositions<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for ComputeAbsolutePositions<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.compute_absolute_position(self.layout_context);
}
}
#[derive(Copy, Clone)]
pub struct BuildDisplayList<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for BuildDisplayList<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.build_display_list(self.layout_context);
}
#[inline]
fn should_process(&self, _: &mut Flow) -> bool {
self.layout_context.shared.goal == ReflowGoal::ForDisplay
}
}
| process | identifier_name |
traversal.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Traversals over the DOM and flow trees, running the layout computations.
use construct::FlowConstructor;
use context::LayoutContext;
use css::matching::{ApplicableDeclarations, MatchMethods, StyleSharingResult};
use flow::{MutableFlowUtils, PostorderFlowTraversal, PreorderFlowTraversal};
use flow::{self, Flow};
use incremental::{self, BUBBLE_ISIZES, REFLOW, REFLOW_OUT_OF_FLOW, RestyleDamage};
use script::layout_interface::ReflowGoal;
use selectors::bloom::BloomFilter;
use std::cell::RefCell;
use std::mem;
use util::opts;
use util::tid::tid;
use wrapper::{LayoutNode, layout_node_to_unsafe_layout_node};
use wrapper::{ThreadSafeLayoutNode, UnsafeLayoutNode};
/// Every time we do another layout, the old bloom filters are invalid. This is
/// detected by ticking a generation number every layout.
type Generation = u32;
/// A pair of the bloom filter used for css selector matching, and the node to
/// which it applies. This is used to efficiently do `Descendant` selector
/// matches. Thanks to the bloom filter, we can avoid walking up the tree
/// looking for ancestors that aren't there in the majority of cases.
///
/// As we walk down the DOM tree a task-local bloom filter is built of all the
/// CSS `SimpleSelector`s which are part of a `Descendant` compound selector
/// (i.e. paired with a `Descendant` combinator, in the `next` field of a
/// `CompoundSelector`.
///
/// Before a `Descendant` selector match is tried, it's compared against the
/// bloom filter. If the bloom filter can exclude it, the selector is quickly
/// rejected.
///
/// When done styling a node, all selectors previously inserted into the filter
/// are removed.
///
/// Since a work-stealing queue is used for styling, sometimes, the bloom filter
/// will no longer be the for the parent of the node we're currently on. When
/// this happens, the task local bloom filter will be thrown away and rebuilt.
thread_local!(
static STYLE_BLOOM: RefCell<Option<(Box<BloomFilter>, UnsafeLayoutNode, Generation)>> = RefCell::new(None));
/// Returns the task local bloom filter.
///
/// If one does not exist, a new one will be made for you. If it is out of date,
/// it will be cleared and reused.
fn take_task_local_bloom_filter(parent_node: Option<LayoutNode>, layout_context: &LayoutContext)
-> Box<BloomFilter> {
STYLE_BLOOM.with(|style_bloom| {
match (parent_node, style_bloom.borrow_mut().take()) {
// Root node. Needs new bloom filter.
(None, _ ) => {
debug!("[{}] No parent, but new bloom filter!", tid());
box BloomFilter::new()
}
// No bloom filter for this thread yet.
(Some(parent), None) => {
let mut bloom_filter = box BloomFilter::new();
insert_ancestors_into_bloom_filter(&mut bloom_filter, parent, layout_context);
bloom_filter
}
// Found cached bloom filter.
(Some(parent), Some((mut bloom_filter, old_node, old_generation))) => { | old_generation == layout_context.shared.generation {
// Hey, the cached parent is our parent! We can reuse the bloom filter.
debug!("[{}] Parent matches (={}). Reusing bloom filter.", tid(), old_node.0);
} else {
// Oh no. the cached parent is stale. I guess we need a new one. Reuse the existing
// allocation to avoid malloc churn.
bloom_filter.clear();
insert_ancestors_into_bloom_filter(&mut bloom_filter, parent, layout_context);
}
bloom_filter
},
}
})
}
fn put_task_local_bloom_filter(bf: Box<BloomFilter>,
unsafe_node: &UnsafeLayoutNode,
layout_context: &LayoutContext) {
STYLE_BLOOM.with(move |style_bloom| {
assert!(style_bloom.borrow().is_none(),
"Putting into a never-taken task-local bloom filter");
*style_bloom.borrow_mut() = Some((bf, *unsafe_node, layout_context.shared.generation));
})
}
/// "Ancestors" in this context is inclusive of ourselves.
fn insert_ancestors_into_bloom_filter(bf: &mut Box<BloomFilter>,
mut n: LayoutNode,
layout_context: &LayoutContext) {
debug!("[{}] Inserting ancestors.", tid());
let mut ancestors = 0;
loop {
ancestors += 1;
n.insert_into_bloom_filter(&mut **bf);
n = match n.layout_parent_node(layout_context.shared) {
None => break,
Some(p) => p,
};
}
debug!("[{}] Inserted {} ancestors.", tid(), ancestors);
}
/// A top-down traversal.
pub trait PreorderDomTraversal {
/// The operation to perform. Return true to continue or false to stop.
fn process(&self, node: LayoutNode);
}
/// A bottom-up traversal, with a optional in-order pass.
pub trait PostorderDomTraversal {
/// The operation to perform. Return true to continue or false to stop.
fn process(&self, node: LayoutNode);
}
/// A bottom-up, parallelizable traversal.
pub trait PostorderNodeMutTraversal {
/// The operation to perform. Return true to continue or false to stop.
fn process<'a>(&'a mut self, node: &ThreadSafeLayoutNode<'a>) -> bool;
/// Returns true if this node should be pruned. If this returns true, we skip the operation
/// entirely and do not process any descendant nodes. This is called *before* child nodes are
/// visited. The default implementation never prunes any nodes.
fn should_prune<'a>(&'a self, _node: &ThreadSafeLayoutNode<'a>) -> bool {
false
}
}
/// The recalc-style-for-node traversal, which styles each node and must run before
/// layout computation. This computes the styles applied to each node.
#[derive(Copy, Clone)]
pub struct RecalcStyleForNode<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderDomTraversal for RecalcStyleForNode<'a> {
#[inline]
#[allow(unsafe_code)]
fn process(&self, node: LayoutNode) {
// Initialize layout data.
//
// FIXME(pcwalton): Stop allocating here. Ideally this should just be done by the HTML
// parser.
node.initialize_layout_data();
// Get the parent node.
let parent_opt = node.layout_parent_node(self.layout_context.shared);
// Get the style bloom filter.
let mut bf = take_task_local_bloom_filter(parent_opt, self.layout_context);
let nonincremental_layout = opts::get().nonincremental_layout;
if nonincremental_layout || node.is_dirty() {
// Remove existing CSS styles from nodes whose content has changed (e.g. text changed),
// to force non-incremental reflow.
if node.has_changed() {
let node = ThreadSafeLayoutNode::new(&node);
node.unstyle();
}
// Check to see whether we can share a style with someone.
let style_sharing_candidate_cache =
&mut self.layout_context.style_sharing_candidate_cache();
let sharing_result = unsafe {
node.share_style_if_possible(style_sharing_candidate_cache,
parent_opt.clone())
};
// Otherwise, match and cascade selectors.
match sharing_result {
StyleSharingResult::CannotShare(mut shareable) => {
let mut applicable_declarations = ApplicableDeclarations::new();
if node.as_element().is_some() {
// Perform the CSS selector matching.
let stylist = unsafe { &*self.layout_context.shared.stylist };
node.match_node(stylist,
Some(&*bf),
&mut applicable_declarations,
&mut shareable);
} else if node.has_changed() {
ThreadSafeLayoutNode::new(&node).set_restyle_damage(
incremental::rebuild_and_reflow())
}
// Perform the CSS cascade.
unsafe {
node.cascade_node(self.layout_context.shared,
parent_opt,
&applicable_declarations,
&mut self.layout_context.applicable_declarations_cache(),
&self.layout_context.shared.new_animations_sender);
}
// Add ourselves to the LRU cache.
if shareable {
if let Some(element) = node.as_element() {
style_sharing_candidate_cache.insert_if_possible(&element);
}
}
}
StyleSharingResult::StyleWasShared(index, damage) => {
style_sharing_candidate_cache.touch(index);
ThreadSafeLayoutNode::new(&node).set_restyle_damage(damage);
}
}
}
let unsafe_layout_node = layout_node_to_unsafe_layout_node(&node);
// Before running the children, we need to insert our nodes into the bloom
// filter.
debug!("[{}] + {:X}", tid(), unsafe_layout_node.0);
node.insert_into_bloom_filter(&mut *bf);
// NB: flow construction updates the bloom filter on the way up.
put_task_local_bloom_filter(bf, &unsafe_layout_node, self.layout_context);
}
}
/// The flow construction traversal, which builds flows for styled nodes.
#[derive(Copy, Clone)]
pub struct ConstructFlows<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderDomTraversal for ConstructFlows<'a> {
#[inline]
#[allow(unsafe_code)]
fn process(&self, node: LayoutNode) {
// Construct flows for this node.
{
let tnode = ThreadSafeLayoutNode::new(&node);
// Always reconstruct if incremental layout is turned off.
let nonincremental_layout = opts::get().nonincremental_layout;
if nonincremental_layout || node.has_dirty_descendants() {
let mut flow_constructor = FlowConstructor::new(self.layout_context);
if nonincremental_layout ||!flow_constructor.repair_if_possible(&tnode) {
flow_constructor.process(&tnode);
debug!("Constructed flow for {:x}: {:x}",
tnode.debug_id(),
tnode.flow_debug_id());
}
}
// Reset the layout damage in this node. It's been propagated to the
// flow by the flow constructor.
tnode.set_restyle_damage(RestyleDamage::empty());
}
unsafe {
node.set_changed(false);
node.set_dirty(false);
node.set_dirty_siblings(false);
node.set_dirty_descendants(false);
}
let unsafe_layout_node = layout_node_to_unsafe_layout_node(&node);
let (mut bf, old_node, old_generation) =
STYLE_BLOOM.with(|style_bloom| {
mem::replace(&mut *style_bloom.borrow_mut(), None)
.expect("The bloom filter should have been set by style recalc.")
});
assert_eq!(old_node, unsafe_layout_node);
assert_eq!(old_generation, self.layout_context.shared.generation);
match node.layout_parent_node(self.layout_context.shared) {
None => {
debug!("[{}] - {:X}, and deleting BF.", tid(), unsafe_layout_node.0);
// If this is the reflow root, eat the task-local bloom filter.
}
Some(parent) => {
// Otherwise, put it back, but remove this node.
node.remove_from_bloom_filter(&mut *bf);
let unsafe_parent = layout_node_to_unsafe_layout_node(&parent);
put_task_local_bloom_filter(bf, &unsafe_parent, self.layout_context);
},
};
}
}
/// The bubble-inline-sizes traversal, the first part of layout computation. This computes
/// preferred and intrinsic inline-sizes and bubbles them up the tree.
pub struct BubbleISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for BubbleISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.bubble_inline_sizes();
flow::mut_base(flow).restyle_damage.remove(BUBBLE_ISIZES);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.contains(BUBBLE_ISIZES)
}
}
/// The assign-inline-sizes traversal. In Gecko this corresponds to `Reflow`.
#[derive(Copy, Clone)]
pub struct AssignISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for AssignISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.assign_inline_sizes(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW)
}
}
/// The assign-block-sizes-and-store-overflow traversal, the last (and most expensive) part of
/// layout computation. Determines the final block-sizes for all layout objects, computes
/// positions, and computes overflow regions. In Gecko this corresponds to `Reflow` and
/// `FinishAndStoreOverflow`.
#[derive(Copy, Clone)]
pub struct AssignBSizesAndStoreOverflow<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for AssignBSizesAndStoreOverflow<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
// Can't do anything with flows impacted by floats until we reach their inorder parent.
// NB: We must return without resetting the restyle bits for these, as we haven't actually
// reflowed anything!
if flow::base(flow).flags.impacted_by_floats() {
return
}
flow.assign_block_size(self.layout_context);
flow.early_store_overflow(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW)
}
}
#[derive(Copy, Clone)]
pub struct ComputeAbsolutePositions<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for ComputeAbsolutePositions<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.compute_absolute_position(self.layout_context);
}
}
#[derive(Copy, Clone)]
pub struct BuildDisplayList<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for BuildDisplayList<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.build_display_list(self.layout_context);
}
#[inline]
fn should_process(&self, _: &mut Flow) -> bool {
self.layout_context.shared.goal == ReflowGoal::ForDisplay
}
} | if old_node == layout_node_to_unsafe_layout_node(&parent) && | random_line_split |
issue-20727-2.rs | // aux-build:issue-20727.rs
// ignore-cross-compile
extern crate issue_20727;
| // @has - '//*[@class="rust trait"]' 'trait Add<RHS = Self> {'
// @has - '//*[@class="rust trait"]' 'type Output;'
type Output;
// @has - '//*[@class="rust trait"]' 'fn add(self, rhs: RHS) -> Self::Output;'
fn add(self, rhs: RHS) -> Self::Output;
}
// @has issue_20727_2/reexport/trait.Add.html
pub mod reexport {
// @has - '//*[@class="rust trait"]' 'trait Add<RHS = Self> {'
// @has - '//*[@class="rust trait"]' 'type Output;'
// @has - '//*[@class="rust trait"]' 'fn add(self, rhs: RHS) -> Self::Output;'
pub use issue_20727::Add;
} | // @has issue_20727_2/trait.Add.html
pub trait Add<RHS = Self> { | random_line_split |
text_box.rs | use super::behaviors::{TextAction, TextBehavior};
use crate::{api::prelude::*, prelude::*, proc_macros::*, themes::theme_orbtk::*};
// --- KEYS --
pub static STYLE_TEXT_BOX: &str = "text_box";
static ID_CURSOR: &str = "id_cursor";
// --- KEYS --
widget!(
/// The `TextBox` widget represents a single line text input widget.
///
/// * style: `text_box`
TextBox: ActivateHandler,
KeyDownHandler,
TextInputHandler {
/// Sets or shares the text property.
text: String,
/// Sets or shares the water_mark text property.
water_mark: String,
/// Sets or shares the text selection property.
selection: TextSelection,
/// Sets or shares the foreground property.
foreground: Brush,
/// Sets or shares the font size property.
font_size: f64,
/// Sets or shares the font property.
font: String,
/// Sets or shares the background property.
background: Brush,
/// Sets or shares the border radius property.
border_radius: f64,
/// Sets or shares the border thickness property.
border_width: Thickness,
/// Sets or shares the border brush property.
border_brush: Brush,
/// Sets or shares the padding property.
padding: Thickness,
/// Sets or shares the focused property.
focused: bool,
/// Sets or shares ta value that describes if the TextBox should lose focus on activation (enter).
lose_focus_on_activation: bool,
/// Used to request focus from outside. Set to `true` tor request focus.
request_focus: bool,
/// If set to `true` all character will be focused when the widget gets focus. Default is `true`
select_all_on_focus: bool,
/// Indicates if the widget is hovered by the mouse cursor.
hover: bool
}
);
impl Template for TextBox {
fn | (self, id: Entity, ctx: &mut BuildContext) -> Self {
let text_block = TextBlock::new()
.v_align("center")
.h_align("start")
.foreground(id)
.text(id)
.water_mark(id)
.font(id)
.font_size(id)
.localizable(false)
.build(ctx);
let cursor = Cursor::new().id(ID_CURSOR).selection(id).build(ctx);
let text_behavior = TextBehavior::new()
.cursor(cursor.0)
.target(id.0)
.text_block(text_block.0)
.focused(id)
.font(id)
.font_size(id)
.lose_focus_on_activation(id)
.select_all_on_focus(id)
.request_focus(id)
.text(id)
.selection(id)
.build(ctx);
self.name("TextBox")
.style(STYLE_TEXT_BOX)
.background(colors::LYNCH_COLOR)
.border_brush("transparent")
.border_width(0.0)
.border_radius(2.0)
.focused(false)
.font_size(orbtk_fonts::FONT_SIZE_12)
.font("Roboto-Regular")
.foreground(colors::LINK_WATER_COLOR)
.height(32.0)
.lose_focus_on_activation(true)
.min_width(128.0)
.padding(4.0)
.select_all_on_focus(true)
.selection(TextSelection::default())
.text("")
.child(text_behavior)
.child(
Container::new()
.background(id)
.border_radius(id)
.border_width(id)
.border_brush(id)
.padding(id)
.child(
Grid::new()
.clip(true)
.child(cursor)
.child(text_block)
.build(ctx),
)
.build(ctx),
)
.on_changed("text", move |ctx, _| {
ctx.send_message(TextAction::ForceUpdate(false), text_behavior);
})
}
}
| template | identifier_name |
text_box.rs | use super::behaviors::{TextAction, TextBehavior};
use crate::{api::prelude::*, prelude::*, proc_macros::*, themes::theme_orbtk::*};
// --- KEYS --
pub static STYLE_TEXT_BOX: &str = "text_box";
static ID_CURSOR: &str = "id_cursor";
// --- KEYS --
widget!(
/// The `TextBox` widget represents a single line text input widget.
///
/// * style: `text_box`
TextBox: ActivateHandler,
KeyDownHandler,
TextInputHandler {
/// Sets or shares the text property.
text: String,
/// Sets or shares the water_mark text property.
water_mark: String,
/// Sets or shares the text selection property.
selection: TextSelection,
/// Sets or shares the foreground property.
foreground: Brush,
/// Sets or shares the font size property.
font_size: f64,
/// Sets or shares the font property.
font: String,
/// Sets or shares the background property.
background: Brush,
/// Sets or shares the border radius property.
border_radius: f64,
/// Sets or shares the border thickness property.
border_width: Thickness,
/// Sets or shares the border brush property.
border_brush: Brush,
/// Sets or shares the padding property.
padding: Thickness,
/// Sets or shares the focused property.
focused: bool,
/// Sets or shares ta value that describes if the TextBox should lose focus on activation (enter).
lose_focus_on_activation: bool,
/// Used to request focus from outside. Set to `true` tor request focus.
request_focus: bool,
/// If set to `true` all character will be focused when the widget gets focus. Default is `true`
select_all_on_focus: bool,
/// Indicates if the widget is hovered by the mouse cursor.
hover: bool
}
);
impl Template for TextBox { | .v_align("center")
.h_align("start")
.foreground(id)
.text(id)
.water_mark(id)
.font(id)
.font_size(id)
.localizable(false)
.build(ctx);
let cursor = Cursor::new().id(ID_CURSOR).selection(id).build(ctx);
let text_behavior = TextBehavior::new()
.cursor(cursor.0)
.target(id.0)
.text_block(text_block.0)
.focused(id)
.font(id)
.font_size(id)
.lose_focus_on_activation(id)
.select_all_on_focus(id)
.request_focus(id)
.text(id)
.selection(id)
.build(ctx);
self.name("TextBox")
.style(STYLE_TEXT_BOX)
.background(colors::LYNCH_COLOR)
.border_brush("transparent")
.border_width(0.0)
.border_radius(2.0)
.focused(false)
.font_size(orbtk_fonts::FONT_SIZE_12)
.font("Roboto-Regular")
.foreground(colors::LINK_WATER_COLOR)
.height(32.0)
.lose_focus_on_activation(true)
.min_width(128.0)
.padding(4.0)
.select_all_on_focus(true)
.selection(TextSelection::default())
.text("")
.child(text_behavior)
.child(
Container::new()
.background(id)
.border_radius(id)
.border_width(id)
.border_brush(id)
.padding(id)
.child(
Grid::new()
.clip(true)
.child(cursor)
.child(text_block)
.build(ctx),
)
.build(ctx),
)
.on_changed("text", move |ctx, _| {
ctx.send_message(TextAction::ForceUpdate(false), text_behavior);
})
}
} | fn template(self, id: Entity, ctx: &mut BuildContext) -> Self {
let text_block = TextBlock::new() | random_line_split |
text_box.rs | use super::behaviors::{TextAction, TextBehavior};
use crate::{api::prelude::*, prelude::*, proc_macros::*, themes::theme_orbtk::*};
// --- KEYS --
pub static STYLE_TEXT_BOX: &str = "text_box";
static ID_CURSOR: &str = "id_cursor";
// --- KEYS --
widget!(
/// The `TextBox` widget represents a single line text input widget.
///
/// * style: `text_box`
TextBox: ActivateHandler,
KeyDownHandler,
TextInputHandler {
/// Sets or shares the text property.
text: String,
/// Sets or shares the water_mark text property.
water_mark: String,
/// Sets or shares the text selection property.
selection: TextSelection,
/// Sets or shares the foreground property.
foreground: Brush,
/// Sets or shares the font size property.
font_size: f64,
/// Sets or shares the font property.
font: String,
/// Sets or shares the background property.
background: Brush,
/// Sets or shares the border radius property.
border_radius: f64,
/// Sets or shares the border thickness property.
border_width: Thickness,
/// Sets or shares the border brush property.
border_brush: Brush,
/// Sets or shares the padding property.
padding: Thickness,
/// Sets or shares the focused property.
focused: bool,
/// Sets or shares ta value that describes if the TextBox should lose focus on activation (enter).
lose_focus_on_activation: bool,
/// Used to request focus from outside. Set to `true` tor request focus.
request_focus: bool,
/// If set to `true` all character will be focused when the widget gets focus. Default is `true`
select_all_on_focus: bool,
/// Indicates if the widget is hovered by the mouse cursor.
hover: bool
}
);
impl Template for TextBox {
fn template(self, id: Entity, ctx: &mut BuildContext) -> Self | .font_size(id)
.lose_focus_on_activation(id)
.select_all_on_focus(id)
.request_focus(id)
.text(id)
.selection(id)
.build(ctx);
self.name("TextBox")
.style(STYLE_TEXT_BOX)
.background(colors::LYNCH_COLOR)
.border_brush("transparent")
.border_width(0.0)
.border_radius(2.0)
.focused(false)
.font_size(orbtk_fonts::FONT_SIZE_12)
.font("Roboto-Regular")
.foreground(colors::LINK_WATER_COLOR)
.height(32.0)
.lose_focus_on_activation(true)
.min_width(128.0)
.padding(4.0)
.select_all_on_focus(true)
.selection(TextSelection::default())
.text("")
.child(text_behavior)
.child(
Container::new()
.background(id)
.border_radius(id)
.border_width(id)
.border_brush(id)
.padding(id)
.child(
Grid::new()
.clip(true)
.child(cursor)
.child(text_block)
.build(ctx),
)
.build(ctx),
)
.on_changed("text", move |ctx, _| {
ctx.send_message(TextAction::ForceUpdate(false), text_behavior);
})
}
}
| {
let text_block = TextBlock::new()
.v_align("center")
.h_align("start")
.foreground(id)
.text(id)
.water_mark(id)
.font(id)
.font_size(id)
.localizable(false)
.build(ctx);
let cursor = Cursor::new().id(ID_CURSOR).selection(id).build(ctx);
let text_behavior = TextBehavior::new()
.cursor(cursor.0)
.target(id.0)
.text_block(text_block.0)
.focused(id)
.font(id) | identifier_body |
spawn_task.rs | use super::*;
use crate::ok_or_shutdown;
use crate::state_helper::{pause_on_failure, save_state, LockedState};
impl TaskHandler {
/// See if we can start a new queued task.
pub fn spawn_new(&mut self) {
let cloned_state_mutex = self.state.clone();
let mut state = cloned_state_mutex.lock().unwrap();
// Check whether a new task can be started.
// Spawn tasks until we no longer have free slots available.
while let Some(id) = self.get_next_task_id(&state) {
self.start_process(id, &mut state);
}
}
/// Search and return the next task that can be started.
/// Precondition for a task to be started:
/// - is in Queued state
/// - There are free slots in the task's group
/// - The group is running
/// - has all its dependencies in `Done` state
pub fn get_next_task_id(&mut self, state: &LockedState) -> Option<usize> {
state
.tasks
.iter()
.filter(|(_, task)| task.status == TaskStatus::Queued)
.filter(|(_, task)| {
// Make sure the task is assigned to an existing group.
let group = match state.groups.get(&task.group) {
Some(group) => group,
None => {
error!(
"Got task with unknown group {}. Please report this!",
&task.group
);
return false;
}
};
// Let's check if the group is running. If it isn't, simply return false.
if group.status!= GroupStatus::Running {
return false;
}
// Get the currently running tasks by looking at the actually running processes.
// They're sorted by group, which makes this quite convenient.
let running_tasks = match self.children.0.get(&task.group) {
Some(children) => children.len(),
None => {
error!(
"Got valid group {}, but no worker pool has been initialized. This is a bug!",
&task.group
);
return false
}
};
// Make sure there are free slots in the task's group
running_tasks < group.parallel_tasks
})
.find(|(_, task)| {
// Check whether all dependencies for this task are fulfilled.
task.dependencies
.iter()
.flat_map(|id| state.tasks.get(id))
.all(|task| matches!(task.status, TaskStatus::Done(TaskResult::Success)))
})
.map(|(id, _)| *id)
}
/// Actually spawn a new sub process
/// The output of subprocesses is piped into a seperate file for easier access
pub fn start_process(&mut self, task_id: usize, state: &mut LockedState) | let (stdout_log, stderr_log) = match create_log_file_handles(task_id, &self.pueue_directory)
{
Ok((out, err)) => (out, err),
Err(err) => {
panic!("Failed to create child log files: {err:?}");
}
};
// Get all necessary info for starting the task
let (command, path, group, mut envs) = {
let task = state.tasks.get(&task_id).unwrap();
(
task.command.clone(),
task.path.clone(),
task.group.clone(),
task.envs.clone(),
)
};
// Build the shell command that should be executed.
let mut command = compile_shell_command(&command);
// Determine the worker's id depending on the current group.
// Inject that info into the environment.
let worker_id = self.children.get_next_group_worker(&group);
envs.insert("PUEUE_GROUP".into(), group.clone());
envs.insert("PUEUE_WORKER_ID".into(), worker_id.to_string());
// Spawn the actual subprocess
let spawned_command = command
.current_dir(path)
.stdin(Stdio::piped())
.envs(envs.clone())
.stdout(Stdio::from(stdout_log))
.stderr(Stdio::from(stderr_log))
.spawn();
// Check if the task managed to spawn
let child = match spawned_command {
Ok(child) => child,
Err(err) => {
let error = format!("Failed to spawn child {task_id} with err: {err:?}");
error!("{}", error);
clean_log_handles(task_id, &self.pueue_directory);
// Update all necessary fields on the task.
let group = {
let task = state.tasks.get_mut(&task_id).unwrap();
task.status = TaskStatus::Done(TaskResult::FailedToSpawn(error));
task.start = Some(Local::now());
task.end = Some(Local::now());
self.spawn_callback(task);
task.group.clone()
};
pause_on_failure(state, &group);
ok_or_shutdown!(self, save_state(state));
return;
}
};
// Save the process handle in our self.children datastructure.
self.children.add_child(&group, worker_id, task_id, child);
let task = state.tasks.get_mut(&task_id).unwrap();
task.start = Some(Local::now());
task.status = TaskStatus::Running;
// Overwrite the task's environment variables with the new ones, containing the
// PUEUE_WORKER_ID and PUEUE_GROUP variables.
task.envs = envs;
info!("Started task: {}", task.command);
ok_or_shutdown!(self, save_state(state));
}
}
| {
// Check if the task exists and can actually be spawned. Otherwise do an early return.
match state.tasks.get(&task_id) {
Some(task) => {
if !matches!(
&task.status,
TaskStatus::Stashed { .. } | TaskStatus::Queued | TaskStatus::Paused
) {
info!("Tried to start task with status: {}", task.status);
return;
}
}
None => {
info!("Tried to start non-existing task: {task_id}");
return;
}
};
// Try to get the log file to which the output of the process will be written to.
// Panic if this doesn't work! This is unrecoverable. | identifier_body |
spawn_task.rs | use super::*;
use crate::ok_or_shutdown;
use crate::state_helper::{pause_on_failure, save_state, LockedState};
impl TaskHandler {
/// See if we can start a new queued task.
pub fn | (&mut self) {
let cloned_state_mutex = self.state.clone();
let mut state = cloned_state_mutex.lock().unwrap();
// Check whether a new task can be started.
// Spawn tasks until we no longer have free slots available.
while let Some(id) = self.get_next_task_id(&state) {
self.start_process(id, &mut state);
}
}
/// Search and return the next task that can be started.
/// Precondition for a task to be started:
/// - is in Queued state
/// - There are free slots in the task's group
/// - The group is running
/// - has all its dependencies in `Done` state
pub fn get_next_task_id(&mut self, state: &LockedState) -> Option<usize> {
state
.tasks
.iter()
.filter(|(_, task)| task.status == TaskStatus::Queued)
.filter(|(_, task)| {
// Make sure the task is assigned to an existing group.
let group = match state.groups.get(&task.group) {
Some(group) => group,
None => {
error!(
"Got task with unknown group {}. Please report this!",
&task.group
);
return false;
}
};
// Let's check if the group is running. If it isn't, simply return false.
if group.status!= GroupStatus::Running {
return false;
}
// Get the currently running tasks by looking at the actually running processes.
// They're sorted by group, which makes this quite convenient.
let running_tasks = match self.children.0.get(&task.group) {
Some(children) => children.len(),
None => {
error!(
"Got valid group {}, but no worker pool has been initialized. This is a bug!",
&task.group
);
return false
}
};
// Make sure there are free slots in the task's group
running_tasks < group.parallel_tasks
})
.find(|(_, task)| {
// Check whether all dependencies for this task are fulfilled.
task.dependencies
.iter()
.flat_map(|id| state.tasks.get(id))
.all(|task| matches!(task.status, TaskStatus::Done(TaskResult::Success)))
})
.map(|(id, _)| *id)
}
/// Actually spawn a new sub process
/// The output of subprocesses is piped into a seperate file for easier access
pub fn start_process(&mut self, task_id: usize, state: &mut LockedState) {
// Check if the task exists and can actually be spawned. Otherwise do an early return.
match state.tasks.get(&task_id) {
Some(task) => {
if!matches!(
&task.status,
TaskStatus::Stashed {.. } | TaskStatus::Queued | TaskStatus::Paused
) {
info!("Tried to start task with status: {}", task.status);
return;
}
}
None => {
info!("Tried to start non-existing task: {task_id}");
return;
}
};
// Try to get the log file to which the output of the process will be written to.
// Panic if this doesn't work! This is unrecoverable.
let (stdout_log, stderr_log) = match create_log_file_handles(task_id, &self.pueue_directory)
{
Ok((out, err)) => (out, err),
Err(err) => {
panic!("Failed to create child log files: {err:?}");
}
};
// Get all necessary info for starting the task
let (command, path, group, mut envs) = {
let task = state.tasks.get(&task_id).unwrap();
(
task.command.clone(),
task.path.clone(),
task.group.clone(),
task.envs.clone(),
)
};
// Build the shell command that should be executed.
let mut command = compile_shell_command(&command);
// Determine the worker's id depending on the current group.
// Inject that info into the environment.
let worker_id = self.children.get_next_group_worker(&group);
envs.insert("PUEUE_GROUP".into(), group.clone());
envs.insert("PUEUE_WORKER_ID".into(), worker_id.to_string());
// Spawn the actual subprocess
let spawned_command = command
.current_dir(path)
.stdin(Stdio::piped())
.envs(envs.clone())
.stdout(Stdio::from(stdout_log))
.stderr(Stdio::from(stderr_log))
.spawn();
// Check if the task managed to spawn
let child = match spawned_command {
Ok(child) => child,
Err(err) => {
let error = format!("Failed to spawn child {task_id} with err: {err:?}");
error!("{}", error);
clean_log_handles(task_id, &self.pueue_directory);
// Update all necessary fields on the task.
let group = {
let task = state.tasks.get_mut(&task_id).unwrap();
task.status = TaskStatus::Done(TaskResult::FailedToSpawn(error));
task.start = Some(Local::now());
task.end = Some(Local::now());
self.spawn_callback(task);
task.group.clone()
};
pause_on_failure(state, &group);
ok_or_shutdown!(self, save_state(state));
return;
}
};
// Save the process handle in our self.children datastructure.
self.children.add_child(&group, worker_id, task_id, child);
let task = state.tasks.get_mut(&task_id).unwrap();
task.start = Some(Local::now());
task.status = TaskStatus::Running;
// Overwrite the task's environment variables with the new ones, containing the
// PUEUE_WORKER_ID and PUEUE_GROUP variables.
task.envs = envs;
info!("Started task: {}", task.command);
ok_or_shutdown!(self, save_state(state));
}
}
| spawn_new | identifier_name |
spawn_task.rs | use super::*;
use crate::ok_or_shutdown;
use crate::state_helper::{pause_on_failure, save_state, LockedState};
impl TaskHandler {
/// See if we can start a new queued task.
pub fn spawn_new(&mut self) {
let cloned_state_mutex = self.state.clone();
let mut state = cloned_state_mutex.lock().unwrap();
// Check whether a new task can be started.
// Spawn tasks until we no longer have free slots available.
while let Some(id) = self.get_next_task_id(&state) {
self.start_process(id, &mut state);
}
}
/// Search and return the next task that can be started.
/// Precondition for a task to be started:
/// - is in Queued state
/// - There are free slots in the task's group
/// - The group is running
/// - has all its dependencies in `Done` state
pub fn get_next_task_id(&mut self, state: &LockedState) -> Option<usize> {
state
.tasks
.iter()
.filter(|(_, task)| task.status == TaskStatus::Queued)
.filter(|(_, task)| {
// Make sure the task is assigned to an existing group.
let group = match state.groups.get(&task.group) {
Some(group) => group,
None => {
error!(
"Got task with unknown group {}. Please report this!",
&task.group
);
return false; | return false;
}
// Get the currently running tasks by looking at the actually running processes.
// They're sorted by group, which makes this quite convenient.
let running_tasks = match self.children.0.get(&task.group) {
Some(children) => children.len(),
None => {
error!(
"Got valid group {}, but no worker pool has been initialized. This is a bug!",
&task.group
);
return false
}
};
// Make sure there are free slots in the task's group
running_tasks < group.parallel_tasks
})
.find(|(_, task)| {
// Check whether all dependencies for this task are fulfilled.
task.dependencies
.iter()
.flat_map(|id| state.tasks.get(id))
.all(|task| matches!(task.status, TaskStatus::Done(TaskResult::Success)))
})
.map(|(id, _)| *id)
}
/// Actually spawn a new sub process
/// The output of subprocesses is piped into a seperate file for easier access
pub fn start_process(&mut self, task_id: usize, state: &mut LockedState) {
// Check if the task exists and can actually be spawned. Otherwise do an early return.
match state.tasks.get(&task_id) {
Some(task) => {
if!matches!(
&task.status,
TaskStatus::Stashed {.. } | TaskStatus::Queued | TaskStatus::Paused
) {
info!("Tried to start task with status: {}", task.status);
return;
}
}
None => {
info!("Tried to start non-existing task: {task_id}");
return;
}
};
// Try to get the log file to which the output of the process will be written to.
// Panic if this doesn't work! This is unrecoverable.
let (stdout_log, stderr_log) = match create_log_file_handles(task_id, &self.pueue_directory)
{
Ok((out, err)) => (out, err),
Err(err) => {
panic!("Failed to create child log files: {err:?}");
}
};
// Get all necessary info for starting the task
let (command, path, group, mut envs) = {
let task = state.tasks.get(&task_id).unwrap();
(
task.command.clone(),
task.path.clone(),
task.group.clone(),
task.envs.clone(),
)
};
// Build the shell command that should be executed.
let mut command = compile_shell_command(&command);
// Determine the worker's id depending on the current group.
// Inject that info into the environment.
let worker_id = self.children.get_next_group_worker(&group);
envs.insert("PUEUE_GROUP".into(), group.clone());
envs.insert("PUEUE_WORKER_ID".into(), worker_id.to_string());
// Spawn the actual subprocess
let spawned_command = command
.current_dir(path)
.stdin(Stdio::piped())
.envs(envs.clone())
.stdout(Stdio::from(stdout_log))
.stderr(Stdio::from(stderr_log))
.spawn();
// Check if the task managed to spawn
let child = match spawned_command {
Ok(child) => child,
Err(err) => {
let error = format!("Failed to spawn child {task_id} with err: {err:?}");
error!("{}", error);
clean_log_handles(task_id, &self.pueue_directory);
// Update all necessary fields on the task.
let group = {
let task = state.tasks.get_mut(&task_id).unwrap();
task.status = TaskStatus::Done(TaskResult::FailedToSpawn(error));
task.start = Some(Local::now());
task.end = Some(Local::now());
self.spawn_callback(task);
task.group.clone()
};
pause_on_failure(state, &group);
ok_or_shutdown!(self, save_state(state));
return;
}
};
// Save the process handle in our self.children datastructure.
self.children.add_child(&group, worker_id, task_id, child);
let task = state.tasks.get_mut(&task_id).unwrap();
task.start = Some(Local::now());
task.status = TaskStatus::Running;
// Overwrite the task's environment variables with the new ones, containing the
// PUEUE_WORKER_ID and PUEUE_GROUP variables.
task.envs = envs;
info!("Started task: {}", task.command);
ok_or_shutdown!(self, save_state(state));
}
} | }
};
// Let's check if the group is running. If it isn't, simply return false.
if group.status != GroupStatus::Running { | random_line_split |
const-contents.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #570
static lsl : int = 1 << 2;
static add : int = 1 + 2;
static addf : f64 = 1.0 + 2.0;
static not : int =!0;
static notb : bool =!true;
static neg : int = -(1);
pub fn | () {
assert_eq!(lsl, 4);
assert_eq!(add, 3);
assert_eq!(addf, 3.0);
assert_eq!(not, -1);
assert_eq!(notb, false);
assert_eq!(neg, -1);
}
| main | identifier_name |
const-contents.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #570
static lsl : int = 1 << 2;
static add : int = 1 + 2;
static addf : f64 = 1.0 + 2.0;
static not : int =!0;
static notb : bool =!true;
static neg : int = -(1);
pub fn main() | {
assert_eq!(lsl, 4);
assert_eq!(add, 3);
assert_eq!(addf, 3.0);
assert_eq!(not, -1);
assert_eq!(notb, false);
assert_eq!(neg, -1);
} | identifier_body |
|
const-contents.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT. | //
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #570
static lsl : int = 1 << 2;
static add : int = 1 + 2;
static addf : f64 = 1.0 + 2.0;
static not : int =!0;
static notb : bool =!true;
static neg : int = -(1);
pub fn main() {
assert_eq!(lsl, 4);
assert_eq!(add, 3);
assert_eq!(addf, 3.0);
assert_eq!(not, -1);
assert_eq!(notb, false);
assert_eq!(neg, -1);
} | random_line_split |
|
common.rs | use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt};
use std::io::{self, Read, Write};
pub enum | {
Connect(u32),
Connected(u32),
Data(u32, Vec<u8>),
Disconnect(u32),
Disconnected(u32), // Really want FIN ACK?
}
/*
impl SimpleFwdOp {
pub fn write_to<A: Write + Sized>(&self, os: &mut A) -> io::Result<()> {
use SimpleFwdOp::*;
match self {
&Connect(id) => {
try!(os.write_byte(1));
try!(os.write_all(1));
}
}
}
}
*/
pub fn read_exact<A: Read + Sized>(stream: &mut A, buf: &mut [u8]) -> io::Result<()> {
stream.take(buf.len() as u64)
.read(buf)
.map(|_| ())
}
pub fn read_frame<A: Read + Sized>(stream: &mut A) -> io::Result<Vec<u8>> {
let length = try!(stream.read_u32::<BigEndian>());
let mut buf = vec![0; length as usize];
try!(read_exact(stream, &mut buf));
Ok(buf)
}
pub fn write_frame<A: Write + Sized>(stream: &mut A, buf: &[u8]) -> io::Result<()> {
try!(stream.write_u32::<BigEndian>(buf.len() as u32));
try!(stream.write_all(buf));
stream.flush()
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Cursor;
fn check_write_read(bs: &[u8]) {
let mut buf = vec![];
write_frame(&mut buf, &bs).unwrap();
let bs2 = read_frame(&mut Cursor::new(buf)).unwrap();
assert_eq!(bs2, bs);
}
#[test]
fn test_write_read() {
check_write_read(&[]);
check_write_read(b"a");
check_write_read(b"asdf");
check_write_read(b"asdf5");
}
}
| SimpleFwdOp | identifier_name |
common.rs | use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt};
use std::io::{self, Read, Write};
pub enum SimpleFwdOp {
Connect(u32),
Connected(u32),
Data(u32, Vec<u8>),
Disconnect(u32),
Disconnected(u32), // Really want FIN ACK?
}
/*
impl SimpleFwdOp {
pub fn write_to<A: Write + Sized>(&self, os: &mut A) -> io::Result<()> {
use SimpleFwdOp::*;
match self {
&Connect(id) => {
try!(os.write_byte(1));
try!(os.write_all(1));
}
}
}
}
*/
pub fn read_exact<A: Read + Sized>(stream: &mut A, buf: &mut [u8]) -> io::Result<()> {
stream.take(buf.len() as u64)
.read(buf)
.map(|_| ())
}
pub fn read_frame<A: Read + Sized>(stream: &mut A) -> io::Result<Vec<u8>> |
pub fn write_frame<A: Write + Sized>(stream: &mut A, buf: &[u8]) -> io::Result<()> {
try!(stream.write_u32::<BigEndian>(buf.len() as u32));
try!(stream.write_all(buf));
stream.flush()
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Cursor;
fn check_write_read(bs: &[u8]) {
let mut buf = vec![];
write_frame(&mut buf, &bs).unwrap();
let bs2 = read_frame(&mut Cursor::new(buf)).unwrap();
assert_eq!(bs2, bs);
}
#[test]
fn test_write_read() {
check_write_read(&[]);
check_write_read(b"a");
check_write_read(b"asdf");
check_write_read(b"asdf5");
}
}
| {
let length = try!(stream.read_u32::<BigEndian>());
let mut buf = vec![0; length as usize];
try!(read_exact(stream, &mut buf));
Ok(buf)
} | identifier_body |
common.rs | use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt};
use std::io::{self, Read, Write};
pub enum SimpleFwdOp {
Connect(u32),
Connected(u32),
Data(u32, Vec<u8>),
Disconnect(u32),
Disconnected(u32), // Really want FIN ACK?
}
/*
impl SimpleFwdOp {
pub fn write_to<A: Write + Sized>(&self, os: &mut A) -> io::Result<()> {
use SimpleFwdOp::*;
match self {
&Connect(id) => {
try!(os.write_byte(1));
try!(os.write_all(1));
}
}
}
}
*/
pub fn read_exact<A: Read + Sized>(stream: &mut A, buf: &mut [u8]) -> io::Result<()> {
stream.take(buf.len() as u64)
.read(buf)
.map(|_| ())
}
pub fn read_frame<A: Read + Sized>(stream: &mut A) -> io::Result<Vec<u8>> {
let length = try!(stream.read_u32::<BigEndian>());
let mut buf = vec![0; length as usize];
try!(read_exact(stream, &mut buf));
Ok(buf)
}
pub fn write_frame<A: Write + Sized>(stream: &mut A, buf: &[u8]) -> io::Result<()> {
try!(stream.write_u32::<BigEndian>(buf.len() as u32));
try!(stream.write_all(buf));
stream.flush()
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Cursor;
fn check_write_read(bs: &[u8]) {
let mut buf = vec![];
write_frame(&mut buf, &bs).unwrap();
let bs2 = read_frame(&mut Cursor::new(buf)).unwrap();
assert_eq!(bs2, bs);
}
#[test]
fn test_write_read() {
check_write_read(&[]);
check_write_read(b"a");
check_write_read(b"asdf");
check_write_read(b"asdf5"); | }
} | random_line_split |
|
vec_delete_left.rs | use malachite_base::vecs::vec_delete_left;
use malachite_base_test_util::bench::bucketers::pair_1_vec_len_bucketer;
use malachite_base_test_util::bench::{run_benchmark, BenchmarkType};
use malachite_base_test_util::generators::common::{GenConfig, GenMode};
use malachite_base_test_util::generators::unsigned_vec_unsigned_pair_gen_var_1;
use malachite_base_test_util::runner::Runner;
pub(crate) fn register(runner: &mut Runner) {
register_demo!(runner, demo_vec_delete_left);
register_bench!(runner, benchmark_vec_delete_left);
}
fn demo_vec_delete_left(gm: GenMode, config: GenConfig, limit: usize) {
for (mut xs, amount) in unsigned_vec_unsigned_pair_gen_var_1::<u8>()
.get(gm, &config)
.take(limit)
{
let old_xs = xs.clone();
vec_delete_left(&mut xs, amount);
println!(
"xs := {:?}; vec_delete_left(&mut xs, {}); xs = {:?}",
old_xs, amount, xs
);
}
}
fn benchmark_vec_delete_left(gm: GenMode, config: GenConfig, limit: usize, file_name: &str) | {
run_benchmark(
"vec_delete_left(&mut [T], usize)",
BenchmarkType::Single,
unsigned_vec_unsigned_pair_gen_var_1::<u8>().get(gm, &config),
gm.name(),
limit,
file_name,
&pair_1_vec_len_bucketer("xs"),
&mut [("Malachite", &mut |(mut xs, amount)| {
vec_delete_left(&mut xs, amount)
})],
);
} | identifier_body |
|
vec_delete_left.rs | use malachite_base::vecs::vec_delete_left;
use malachite_base_test_util::bench::bucketers::pair_1_vec_len_bucketer;
use malachite_base_test_util::bench::{run_benchmark, BenchmarkType};
use malachite_base_test_util::generators::common::{GenConfig, GenMode};
use malachite_base_test_util::generators::unsigned_vec_unsigned_pair_gen_var_1;
use malachite_base_test_util::runner::Runner;
pub(crate) fn register(runner: &mut Runner) {
register_demo!(runner, demo_vec_delete_left);
register_bench!(runner, benchmark_vec_delete_left);
}
fn demo_vec_delete_left(gm: GenMode, config: GenConfig, limit: usize) { | vec_delete_left(&mut xs, amount);
println!(
"xs := {:?}; vec_delete_left(&mut xs, {}); xs = {:?}",
old_xs, amount, xs
);
}
}
fn benchmark_vec_delete_left(gm: GenMode, config: GenConfig, limit: usize, file_name: &str) {
run_benchmark(
"vec_delete_left(&mut [T], usize)",
BenchmarkType::Single,
unsigned_vec_unsigned_pair_gen_var_1::<u8>().get(gm, &config),
gm.name(),
limit,
file_name,
&pair_1_vec_len_bucketer("xs"),
&mut [("Malachite", &mut |(mut xs, amount)| {
vec_delete_left(&mut xs, amount)
})],
);
} | for (mut xs, amount) in unsigned_vec_unsigned_pair_gen_var_1::<u8>()
.get(gm, &config)
.take(limit)
{
let old_xs = xs.clone(); | random_line_split |
vec_delete_left.rs | use malachite_base::vecs::vec_delete_left;
use malachite_base_test_util::bench::bucketers::pair_1_vec_len_bucketer;
use malachite_base_test_util::bench::{run_benchmark, BenchmarkType};
use malachite_base_test_util::generators::common::{GenConfig, GenMode};
use malachite_base_test_util::generators::unsigned_vec_unsigned_pair_gen_var_1;
use malachite_base_test_util::runner::Runner;
pub(crate) fn register(runner: &mut Runner) {
register_demo!(runner, demo_vec_delete_left);
register_bench!(runner, benchmark_vec_delete_left);
}
fn demo_vec_delete_left(gm: GenMode, config: GenConfig, limit: usize) {
for (mut xs, amount) in unsigned_vec_unsigned_pair_gen_var_1::<u8>()
.get(gm, &config)
.take(limit)
{
let old_xs = xs.clone();
vec_delete_left(&mut xs, amount);
println!(
"xs := {:?}; vec_delete_left(&mut xs, {}); xs = {:?}",
old_xs, amount, xs
);
}
}
fn | (gm: GenMode, config: GenConfig, limit: usize, file_name: &str) {
run_benchmark(
"vec_delete_left(&mut [T], usize)",
BenchmarkType::Single,
unsigned_vec_unsigned_pair_gen_var_1::<u8>().get(gm, &config),
gm.name(),
limit,
file_name,
&pair_1_vec_len_bucketer("xs"),
&mut [("Malachite", &mut |(mut xs, amount)| {
vec_delete_left(&mut xs, amount)
})],
);
}
| benchmark_vec_delete_left | identifier_name |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(plugin)]
#![cfg_attr(test, feature(core_intrinsics))]
#![plugin(plugins)]
extern crate app_units;
extern crate cssparser;
extern crate euclid;
extern crate selectors;
#[macro_use(atom, ns)] extern crate string_cache;
extern crate style;
extern crate style_traits;
extern crate url;
extern crate util;
#[cfg(test)] mod stylesheets;
#[cfg(test)] mod media_queries;
#[cfg(test)] mod viewport;
#[cfg(test)] mod writing_modes {
use style::properties::{INITIAL_VALUES, get_writing_mode};
use util::logical_geometry::WritingMode;
#[test]
fn initial_writing_mode_is_empty() |
}
| {
assert_eq!(get_writing_mode(INITIAL_VALUES.get_inheritedbox()), WritingMode::empty())
} | identifier_body |
lib.rs | #![cfg_attr(test, feature(core_intrinsics))]
#![plugin(plugins)]
extern crate app_units;
extern crate cssparser;
extern crate euclid;
extern crate selectors;
#[macro_use(atom, ns)] extern crate string_cache;
extern crate style;
extern crate style_traits;
extern crate url;
extern crate util;
#[cfg(test)] mod stylesheets;
#[cfg(test)] mod media_queries;
#[cfg(test)] mod viewport;
#[cfg(test)] mod writing_modes {
use style::properties::{INITIAL_VALUES, get_writing_mode};
use util::logical_geometry::WritingMode;
#[test]
fn initial_writing_mode_is_empty() {
assert_eq!(get_writing_mode(INITIAL_VALUES.get_inheritedbox()), WritingMode::empty())
}
} | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(plugin)] | random_line_split |
|
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(plugin)]
#![cfg_attr(test, feature(core_intrinsics))]
#![plugin(plugins)]
extern crate app_units;
extern crate cssparser;
extern crate euclid;
extern crate selectors;
#[macro_use(atom, ns)] extern crate string_cache;
extern crate style;
extern crate style_traits;
extern crate url;
extern crate util;
#[cfg(test)] mod stylesheets;
#[cfg(test)] mod media_queries;
#[cfg(test)] mod viewport;
#[cfg(test)] mod writing_modes {
use style::properties::{INITIAL_VALUES, get_writing_mode};
use util::logical_geometry::WritingMode;
#[test]
fn | () {
assert_eq!(get_writing_mode(INITIAL_VALUES.get_inheritedbox()), WritingMode::empty())
}
}
| initial_writing_mode_is_empty | identifier_name |
issue-33537.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
const fn foo() -> *const i8 {
b"foo" as *const _ as *const i8
}
const fn bar() -> i32 {
*&{(1, 2, 3).1}
}
fn main() {
assert_eq!(foo(), b"foo" as *const _ as *const i8); | } | assert_eq!(bar(), 2); | random_line_split |
issue-33537.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
const fn foo() -> *const i8 {
b"foo" as *const _ as *const i8
}
const fn | () -> i32 {
*&{(1, 2, 3).1}
}
fn main() {
assert_eq!(foo(), b"foo" as *const _ as *const i8);
assert_eq!(bar(), 2);
}
| bar | identifier_name |
issue-33537.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
const fn foo() -> *const i8 {
b"foo" as *const _ as *const i8
}
const fn bar() -> i32 |
fn main() {
assert_eq!(foo(), b"foo" as *const _ as *const i8);
assert_eq!(bar(), 2);
}
| {
*&{(1, 2, 3).1}
} | identifier_body |
uart.rs | use core::fmt;
use core::str::StrExt;
use core::result::Result;
use hw::HW;
pub trait Uart<W : HW> {
fn put(&self, &mut W, ch : u8);
}
pub trait UartWriter : fmt::Write { }
pub struct | ;
impl UartWriter for DummyUartWriter { }
impl fmt::Write for DummyUartWriter {
fn write_str(&mut self, _: &str) -> fmt::Result {
Result::Ok(())
}
}
pub struct BlockingUartWriter<H :'static+HW> {
uart : &'static Uart<H>,
hw : &'static mut H,
}
impl<H : HW> UartWriter for BlockingUartWriter<H> { }
impl<H> BlockingUartWriter<H>
where H : HW {
pub fn new(hw : &'static mut H, uart : &'static Uart<H>) -> BlockingUartWriter<H> {
BlockingUartWriter { uart: uart, hw: hw }
}
}
impl<H> fmt::Write for BlockingUartWriter<H>
where H : HW {
fn write_str(&mut self, s: &str) -> fmt::Result {
for ch in s.bytes() {
self.uart.put(self.hw, ch);
}
Result::Ok(())
}
}
| DummyUartWriter | identifier_name |
uart.rs | use core::fmt;
use core::str::StrExt;
use core::result::Result;
|
pub trait UartWriter : fmt::Write { }
pub struct DummyUartWriter;
impl UartWriter for DummyUartWriter { }
impl fmt::Write for DummyUartWriter {
fn write_str(&mut self, _: &str) -> fmt::Result {
Result::Ok(())
}
}
pub struct BlockingUartWriter<H :'static+HW> {
uart : &'static Uart<H>,
hw : &'static mut H,
}
impl<H : HW> UartWriter for BlockingUartWriter<H> { }
impl<H> BlockingUartWriter<H>
where H : HW {
pub fn new(hw : &'static mut H, uart : &'static Uart<H>) -> BlockingUartWriter<H> {
BlockingUartWriter { uart: uart, hw: hw }
}
}
impl<H> fmt::Write for BlockingUartWriter<H>
where H : HW {
fn write_str(&mut self, s: &str) -> fmt::Result {
for ch in s.bytes() {
self.uart.put(self.hw, ch);
}
Result::Ok(())
}
} | use hw::HW;
pub trait Uart<W : HW> {
fn put(&self, &mut W, ch : u8);
} | random_line_split |
uart.rs | use core::fmt;
use core::str::StrExt;
use core::result::Result;
use hw::HW;
pub trait Uart<W : HW> {
fn put(&self, &mut W, ch : u8);
}
pub trait UartWriter : fmt::Write { }
pub struct DummyUartWriter;
impl UartWriter for DummyUartWriter { }
impl fmt::Write for DummyUartWriter {
fn write_str(&mut self, _: &str) -> fmt::Result {
Result::Ok(())
}
}
pub struct BlockingUartWriter<H :'static+HW> {
uart : &'static Uart<H>,
hw : &'static mut H,
}
impl<H : HW> UartWriter for BlockingUartWriter<H> { }
impl<H> BlockingUartWriter<H>
where H : HW {
pub fn new(hw : &'static mut H, uart : &'static Uart<H>) -> BlockingUartWriter<H> |
}
impl<H> fmt::Write for BlockingUartWriter<H>
where H : HW {
fn write_str(&mut self, s: &str) -> fmt::Result {
for ch in s.bytes() {
self.uart.put(self.hw, ch);
}
Result::Ok(())
}
}
| {
BlockingUartWriter { uart: uart, hw: hw }
} | identifier_body |
connecting.rs | extern crate ftp;
use std::str;
use std::io::Cursor;
use ftp::FtpStream;
fn | () {
let mut ftp_stream = match FtpStream::connect("127.0.0.1", 21) {
Ok(s) => s,
Err(e) => panic!("{}", e)
};
match ftp_stream.login("username", "password") {
Ok(_) => (),
Err(e) => panic!("{}", e)
}
match ftp_stream.current_dir() {
Ok(dir) => println!("{}", dir),
Err(e) => panic!("{}", e)
}
match ftp_stream.change_dir("test_data") {
Ok(_) => (),
Err(e) => panic!("{}", e)
}
//An easy way to retreive a file
let remote_file = match ftp_stream.simple_retr("ftpext-charter.txt") {
Ok(file) => file,
Err(e) => panic!("{}", e)
};
match str::from_utf8(&remote_file.into_inner()) {
Ok(s) => print!("{}", s),
Err(e) => panic!("Error reading file data: {}", e)
};
//Store a file
let file_data = format!("Some awesome file data man!!");
let reader: &mut Cursor<Vec<u8>> = &mut Cursor::new(file_data.into_bytes());
match ftp_stream.stor("my_random_file.txt", reader) {
Ok(_) => (),
Err(e) => panic!("{}", e)
}
let _ = ftp_stream.quit();
}
| main | identifier_name |
connecting.rs | extern crate ftp;
use std::str;
use std::io::Cursor;
use ftp::FtpStream;
fn main() |
//An easy way to retreive a file
let remote_file = match ftp_stream.simple_retr("ftpext-charter.txt") {
Ok(file) => file,
Err(e) => panic!("{}", e)
};
match str::from_utf8(&remote_file.into_inner()) {
Ok(s) => print!("{}", s),
Err(e) => panic!("Error reading file data: {}", e)
};
//Store a file
let file_data = format!("Some awesome file data man!!");
let reader: &mut Cursor<Vec<u8>> = &mut Cursor::new(file_data.into_bytes());
match ftp_stream.stor("my_random_file.txt", reader) {
Ok(_) => (),
Err(e) => panic!("{}", e)
}
let _ = ftp_stream.quit();
}
| {
let mut ftp_stream = match FtpStream::connect("127.0.0.1", 21) {
Ok(s) => s,
Err(e) => panic!("{}", e)
};
match ftp_stream.login("username", "password") {
Ok(_) => (),
Err(e) => panic!("{}", e)
}
match ftp_stream.current_dir() {
Ok(dir) => println!("{}", dir),
Err(e) => panic!("{}", e)
}
match ftp_stream.change_dir("test_data") {
Ok(_) => (),
Err(e) => panic!("{}", e)
} | identifier_body |
connecting.rs | extern crate ftp;
use std::str;
use std::io::Cursor;
use ftp::FtpStream;
fn main() {
let mut ftp_stream = match FtpStream::connect("127.0.0.1", 21) {
Ok(s) => s,
Err(e) => panic!("{}", e)
};
match ftp_stream.login("username", "password") {
Ok(_) => (),
Err(e) => panic!("{}", e)
}
match ftp_stream.current_dir() {
Ok(dir) => println!("{}", dir),
Err(e) => panic!("{}", e)
}
match ftp_stream.change_dir("test_data") {
Ok(_) => (),
Err(e) => panic!("{}", e)
}
//An easy way to retreive a file
let remote_file = match ftp_stream.simple_retr("ftpext-charter.txt") {
Ok(file) => file,
Err(e) => panic!("{}", e)
};
match str::from_utf8(&remote_file.into_inner()) {
Ok(s) => print!("{}", s),
Err(e) => panic!("Error reading file data: {}", e)
};
//Store a file
let file_data = format!("Some awesome file data man!!");
let reader: &mut Cursor<Vec<u8>> = &mut Cursor::new(file_data.into_bytes());
match ftp_stream.stor("my_random_file.txt", reader) {
Ok(_) => (),
Err(e) => panic!("{}", e) | let _ = ftp_stream.quit();
} | }
| random_line_split |
users.rs | #![crate_name = "users"]
/*
* This file is part of the uutils coreutils package.
*
* (c) KokaKiwi <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: whoami (GNU coreutils) 8.22 */
// Allow dead code here in order to keep all fields, constants here, for consistency.
#![allow(dead_code)]
extern crate getopts;
extern crate libc;
#[macro_use]
extern crate uucore;
use getopts::Options;
use std::ffi::{CStr, CString};
use std::mem;
use std::ptr;
use uucore::utmpx::*;
extern {
fn getutxent() -> *const c_utmp;
fn getutxid(ut: *const c_utmp) -> *const c_utmp;
fn getutxline(ut: *const c_utmp) -> *const c_utmp;
fn pututxline(ut: *const c_utmp) -> *const c_utmp;
fn setutxent();
fn endutxent();
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn utmpxname(file: *const libc::c_char) -> libc::c_int;
}
#[cfg(target_os = "freebsd")]
unsafe extern fn utmpxname(_file: *const libc::c_char) -> libc::c_int {
0
}
static NAME: &'static str = "users";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!("{}", f),
};
if matches.opt_present("help") {
println!("{} {}", NAME, VERSION);
println!("");
println!("Usage:");
println!(" {} [OPTION]... [FILE]", NAME);
println!("");
println!("{}", opts.usage("Output who is currently logged in according to FILE."));
return 0;
}
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
let filename = if matches.free.len() > 0 {
matches.free[0].as_ref()
} else {
DEFAULT_FILE
};
exec(filename);
0
}
fn exec(filename: &str) | }
}
endutxent();
}
if users.len() > 0 {
users.sort();
println!("{}", users.join(" "));
}
}
#[allow(dead_code)]
fn main() {
std::process::exit(uumain(std::env::args().collect()));
}
| {
unsafe {
utmpxname(CString::new(filename).unwrap().as_ptr());
}
let mut users = vec!();
unsafe {
setutxent();
loop {
let line = getutxent();
if line == ptr::null() {
break;
}
if (*line).ut_type == USER_PROCESS {
let user = String::from_utf8_lossy(CStr::from_ptr(mem::transmute(&(*line).ut_user)).to_bytes()).to_string();
users.push(user); | identifier_body |
users.rs | #![crate_name = "users"]
/*
* This file is part of the uutils coreutils package.
*
* (c) KokaKiwi <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: whoami (GNU coreutils) 8.22 */
// Allow dead code here in order to keep all fields, constants here, for consistency.
#![allow(dead_code)]
extern crate getopts;
extern crate libc;
#[macro_use]
extern crate uucore;
use getopts::Options;
use std::ffi::{CStr, CString};
use std::mem;
use std::ptr;
use uucore::utmpx::*;
extern {
fn getutxent() -> *const c_utmp;
fn getutxid(ut: *const c_utmp) -> *const c_utmp;
fn getutxline(ut: *const c_utmp) -> *const c_utmp;
fn pututxline(ut: *const c_utmp) -> *const c_utmp;
fn setutxent();
fn endutxent();
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn utmpxname(file: *const libc::c_char) -> libc::c_int;
}
#[cfg(target_os = "freebsd")]
unsafe extern fn utmpxname(_file: *const libc::c_char) -> libc::c_int {
0
}
static NAME: &'static str = "users";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!("{}", f),
};
if matches.opt_present("help") {
println!("{} {}", NAME, VERSION);
println!("");
println!("Usage:");
println!(" {} [OPTION]... [FILE]", NAME);
println!("");
println!("{}", opts.usage("Output who is currently logged in according to FILE."));
return 0;
}
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
let filename = if matches.free.len() > 0 {
matches.free[0].as_ref()
} else {
DEFAULT_FILE
};
exec(filename);
0
}
fn exec(filename: &str) {
unsafe {
utmpxname(CString::new(filename).unwrap().as_ptr());
}
let mut users = vec!();
unsafe {
setutxent();
loop {
let line = getutxent();
if line == ptr::null() {
break;
}
if (*line).ut_type == USER_PROCESS {
let user = String::from_utf8_lossy(CStr::from_ptr(mem::transmute(&(*line).ut_user)).to_bytes()).to_string();
users.push(user);
}
}
endutxent();
}
if users.len() > 0 {
users.sort();
println!("{}", users.join(" "));
}
}
#[allow(dead_code)]
fn | () {
std::process::exit(uumain(std::env::args().collect()));
}
| main | identifier_name |
users.rs | #![crate_name = "users"]
/*
* This file is part of the uutils coreutils package.
*
* (c) KokaKiwi <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: whoami (GNU coreutils) 8.22 */
// Allow dead code here in order to keep all fields, constants here, for consistency.
#![allow(dead_code)]
extern crate getopts;
extern crate libc;
#[macro_use]
extern crate uucore;
use getopts::Options;
use std::ffi::{CStr, CString};
use std::mem;
use std::ptr;
use uucore::utmpx::*;
extern {
fn getutxent() -> *const c_utmp;
fn getutxid(ut: *const c_utmp) -> *const c_utmp;
fn getutxline(ut: *const c_utmp) -> *const c_utmp;
fn pututxline(ut: *const c_utmp) -> *const c_utmp;
fn setutxent();
fn endutxent();
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn utmpxname(file: *const libc::c_char) -> libc::c_int;
}
#[cfg(target_os = "freebsd")]
unsafe extern fn utmpxname(_file: *const libc::c_char) -> libc::c_int {
0
}
static NAME: &'static str = "users";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!("{}", f),
};
if matches.opt_present("help") {
println!("{} {}", NAME, VERSION);
println!("");
println!("Usage:");
println!(" {} [OPTION]... [FILE]", NAME);
println!("");
println!("{}", opts.usage("Output who is currently logged in according to FILE."));
return 0;
}
if matches.opt_present("version") |
let filename = if matches.free.len() > 0 {
matches.free[0].as_ref()
} else {
DEFAULT_FILE
};
exec(filename);
0
}
fn exec(filename: &str) {
unsafe {
utmpxname(CString::new(filename).unwrap().as_ptr());
}
let mut users = vec!();
unsafe {
setutxent();
loop {
let line = getutxent();
if line == ptr::null() {
break;
}
if (*line).ut_type == USER_PROCESS {
let user = String::from_utf8_lossy(CStr::from_ptr(mem::transmute(&(*line).ut_user)).to_bytes()).to_string();
users.push(user);
}
}
endutxent();
}
if users.len() > 0 {
users.sort();
println!("{}", users.join(" "));
}
}
#[allow(dead_code)]
fn main() {
std::process::exit(uumain(std::env::args().collect()));
}
| {
println!("{} {}", NAME, VERSION);
return 0;
} | conditional_block |
users.rs | #![crate_name = "users"]
/*
* This file is part of the uutils coreutils package.
*
* (c) KokaKiwi <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: whoami (GNU coreutils) 8.22 */
// Allow dead code here in order to keep all fields, constants here, for consistency.
#![allow(dead_code)]
| extern crate libc;
#[macro_use]
extern crate uucore;
use getopts::Options;
use std::ffi::{CStr, CString};
use std::mem;
use std::ptr;
use uucore::utmpx::*;
extern {
fn getutxent() -> *const c_utmp;
fn getutxid(ut: *const c_utmp) -> *const c_utmp;
fn getutxline(ut: *const c_utmp) -> *const c_utmp;
fn pututxline(ut: *const c_utmp) -> *const c_utmp;
fn setutxent();
fn endutxent();
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn utmpxname(file: *const libc::c_char) -> libc::c_int;
}
#[cfg(target_os = "freebsd")]
unsafe extern fn utmpxname(_file: *const libc::c_char) -> libc::c_int {
0
}
static NAME: &'static str = "users";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!("{}", f),
};
if matches.opt_present("help") {
println!("{} {}", NAME, VERSION);
println!("");
println!("Usage:");
println!(" {} [OPTION]... [FILE]", NAME);
println!("");
println!("{}", opts.usage("Output who is currently logged in according to FILE."));
return 0;
}
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
let filename = if matches.free.len() > 0 {
matches.free[0].as_ref()
} else {
DEFAULT_FILE
};
exec(filename);
0
}
fn exec(filename: &str) {
unsafe {
utmpxname(CString::new(filename).unwrap().as_ptr());
}
let mut users = vec!();
unsafe {
setutxent();
loop {
let line = getutxent();
if line == ptr::null() {
break;
}
if (*line).ut_type == USER_PROCESS {
let user = String::from_utf8_lossy(CStr::from_ptr(mem::transmute(&(*line).ut_user)).to_bytes()).to_string();
users.push(user);
}
}
endutxent();
}
if users.len() > 0 {
users.sort();
println!("{}", users.join(" "));
}
}
#[allow(dead_code)]
fn main() {
std::process::exit(uumain(std::env::args().collect()));
} | extern crate getopts; | random_line_split |
cmd_isready_test.rs | // Raven is a high performance UCI chess engine
// Copyright (C) 2015-2015 Nam Pham
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use super::super::types::*;
#[test]
fn test_create_good_isready_command() {
let r = Command::parse("isready");
assert!(r.is_ok());
let cmd = r.ok().unwrap();
assert!(cmd == Command::ISREADY);
}
#[test]
fn test_create_good_isready_command_with_spaces() {
let r = Command::parse(" isready ");
assert!(r.is_ok());
let cmd = r.ok().unwrap();
assert!(cmd == Command::ISREADY);
}
#[test]
fn test_create_bad_isready_command() {
let r = Command::parse("nonexistence");
assert!(r.is_err());
}
#[test]
fn | () {
let r = Command::parse("isready param1 param2");
assert!(r.is_err());
}
| test_create_good_isready_command_but_with_extra_params | identifier_name |
cmd_isready_test.rs | // Raven is a high performance UCI chess engine
// Copyright (C) 2015-2015 Nam Pham
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use super::super::types::*;
#[test]
fn test_create_good_isready_command() {
let r = Command::parse("isready");
assert!(r.is_ok());
let cmd = r.ok().unwrap();
assert!(cmd == Command::ISREADY);
}
#[test]
fn test_create_good_isready_command_with_spaces() {
let r = Command::parse(" isready ");
assert!(r.is_ok());
let cmd = r.ok().unwrap();
assert!(cmd == Command::ISREADY);
}
#[test]
fn test_create_bad_isready_command() |
#[test]
fn test_create_good_isready_command_but_with_extra_params() {
let r = Command::parse("isready param1 param2");
assert!(r.is_err());
}
| {
let r = Command::parse("nonexistence");
assert!(r.is_err());
} | identifier_body |
cmd_isready_test.rs | // Raven is a high performance UCI chess engine
// Copyright (C) 2015-2015 Nam Pham
// | //
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use super::super::types::*;
#[test]
fn test_create_good_isready_command() {
let r = Command::parse("isready");
assert!(r.is_ok());
let cmd = r.ok().unwrap();
assert!(cmd == Command::ISREADY);
}
#[test]
fn test_create_good_isready_command_with_spaces() {
let r = Command::parse(" isready ");
assert!(r.is_ok());
let cmd = r.ok().unwrap();
assert!(cmd == Command::ISREADY);
}
#[test]
fn test_create_bad_isready_command() {
let r = Command::parse("nonexistence");
assert!(r.is_err());
}
#[test]
fn test_create_good_isready_command_but_with_extra_params() {
let r = Command::parse("isready param1 param2");
assert!(r.is_err());
} | // This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. | random_line_split |
single-line-if-else.rs | // rustfmt-single_line_if_else_max_width: 100
// Format if-else expressions on a single line, when possible.
| fn main() {
let a = if 1 > 2 {
unreachable!()
} else {
10
};
let a = if x { 1 } else if y { 2 } else { 3 };
let b = if cond() {
5
} else {
// Brief comment.
10
};
let c = if cond() {
statement();
5
} else {
10
};
let d = if let Some(val) = turbo
{ "cool" } else {
"beans" };
if cond() { statement(); } else { other_statement(); }
if true {
do_something()
}
let x = if veeeeeeeeery_loooooong_condition() { aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa } else { bbbbbbbbbb };
let x = if veeeeeeeeery_loooooong_condition() { aaaaaaaaaaaaaaaaaaaaaaaaa } else {
bbbbbbbbbb };
funk(if test() {
1
} else {
2
},
arg2);
} | random_line_split |
|
single-line-if-else.rs | // rustfmt-single_line_if_else_max_width: 100
// Format if-else expressions on a single line, when possible.
fn main() | } else {
10
};
let d = if let Some(val) = turbo
{ "cool" } else {
"beans" };
if cond() { statement(); } else { other_statement(); }
if true {
do_something()
}
let x = if veeeeeeeeery_loooooong_condition() { aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa } else { bbbbbbbbbb };
let x = if veeeeeeeeery_loooooong_condition() { aaaaaaaaaaaaaaaaaaaaaaaaa } else {
bbbbbbbbbb };
funk(if test() {
1
} else {
2
},
arg2);
}
| {
let a = if 1 > 2 {
unreachable!()
} else {
10
};
let a = if x { 1 } else if y { 2 } else { 3 };
let b = if cond() {
5
} else {
// Brief comment.
10
};
let c = if cond() {
statement();
5 | identifier_body |
single-line-if-else.rs | // rustfmt-single_line_if_else_max_width: 100
// Format if-else expressions on a single line, when possible.
fn | () {
let a = if 1 > 2 {
unreachable!()
} else {
10
};
let a = if x { 1 } else if y { 2 } else { 3 };
let b = if cond() {
5
} else {
// Brief comment.
10
};
let c = if cond() {
statement();
5
} else {
10
};
let d = if let Some(val) = turbo
{ "cool" } else {
"beans" };
if cond() { statement(); } else { other_statement(); }
if true {
do_something()
}
let x = if veeeeeeeeery_loooooong_condition() { aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa } else { bbbbbbbbbb };
let x = if veeeeeeeeery_loooooong_condition() { aaaaaaaaaaaaaaaaaaaaaaaaa } else {
bbbbbbbbbb };
funk(if test() {
1
} else {
2
},
arg2);
}
| main | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.