file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
build.rs | extern crate rustc_version;
extern crate rusoto_codegen;
extern crate rayon;
use std::env;
use std::path::Path;
use std::io::Write;
use std::fs::File;
use rusoto_codegen::{Service, generate};
use rayon::prelude::*;
/// Parses and generates variables used to construct a User-Agent.
///
/// This is used to create a User-Agent header string resembling
/// `rusoto/x.y.z rust/x.y.z <os>`.
fn generate_user_agent_vars(output_path: &Path) {
let rust_version = rustc_version::version();
let mut f = File::create(&output_path.join("user_agent_vars.rs"))
.expect("Could not create user agent file");
f.write_all(format!("static RUST_VERSION: &'static str = \"{}\";", rust_version).as_bytes())
.expect("Unable to write user agent");
}
/*
gamelift/2015-10-01/service-2.json: "protocol":"json"
support/2013-04-15/service-2.json: "protocol":"json"
*/
// expand to use cfg!() so codegen only gets run for services
// in the features list
macro_rules! services {
( $( [$name:expr, $date:expr] ),* ) => {
{
let mut services = Vec::new();
$(
if cfg!(feature = $name) {
services.push(Service::new($name, $date));
}
)*
services
}
}
}
fn main() {
let out_dir = env::var_os("OUT_DIR").expect("OUT_DIR not specified");
let out_path = Path::new(&out_dir).to_owned();
let services = services! {
["acm", "2015-12-08"],
["autoscaling", "2011-01-01"],
["cloudformation", "2010-05-15"],
["cloudfront", "2016-11-25"],
["cloudhsm", "2014-05-30"],
["cloudsearch", "2013-01-01"],
["cloudtrail", "2013-11-01"],
["cloudwatch", "2010-08-01"],
["codecommit", "2015-04-13"],
["codedeploy", "2014-10-06"],
["codepipeline", "2015-07-09"],
["cognito-identity", "2014-06-30"],
["config", "2014-11-12"],
["datapipeline", "2012-10-29"],
["devicefarm", "2015-06-23"],
["directconnect", "2012-10-25"],
["ds", "2015-04-16"],
["dynamodb", "2012-08-10"],
["dynamodbstreams", "2012-08-10"],
["ec2", "2016-11-15"],
["ecr", "2015-09-21"],
["ecs", "2014-11-13"],
["elasticache", "2015-02-02"],
["elasticbeanstalk", "2010-12-01"],
["elastictranscoder", "2012-09-25"],
["elb", "2012-06-01"],
["elbv2", "2015-12-01"],
["emr", "2009-03-31"],
["events", "2015-10-07"],
["firehose", "2015-08-04"],
["iam", "2010-05-08"],
["importexport", "2010-06-01"],
["inspector", "2016-02-16"],
["iot", "2015-05-28"],
["kinesis", "2013-12-02"],
["kms", "2014-11-01"],
["lambda", "2015-03-31"],
["logs", "2014-03-28"],
["machinelearning", "2014-12-12"],
["marketplacecommerceanalytics", "2015-07-01"],
["opsworks", "2013-02-18"],
["redshift", "2012-12-01"],
["rds", "2014-10-31"],
["route53", "2013-04-01"],
["route53domains", "2014-05-15"], | ["sqs", "2012-11-05"],
["ssm", "2014-11-06"],
["storagegateway", "2013-06-30"],
["sts", "2011-06-15"],
["swf", "2012-01-25"],
["waf", "2015-08-24"],
["workspaces", "2015-04-08"]
};
let count: usize = services.into_par_iter().map(|service| generate(service, &out_path.clone())).count();
println!("\nGenerated {:?} services.\n", count);
generate_user_agent_vars(&out_path);
let codegen_dir = Path::new("codegen");
// avoid unnecessary recompiles when used as a crates.io dependency
if codegen_dir.exists() {
println!("cargo:rerun-if-changed=codegen");
}
} | ["s3", "2006-03-01"],
["sdb", "2009-04-15"],
["sns", "2010-03-31"], | random_line_split |
build.rs | extern crate rustc_version;
extern crate rusoto_codegen;
extern crate rayon;
use std::env;
use std::path::Path;
use std::io::Write;
use std::fs::File;
use rusoto_codegen::{Service, generate};
use rayon::prelude::*;
/// Parses and generates variables used to construct a User-Agent.
///
/// This is used to create a User-Agent header string resembling
/// `rusoto/x.y.z rust/x.y.z <os>`.
fn generate_user_agent_vars(output_path: &Path) |
/*
gamelift/2015-10-01/service-2.json: "protocol":"json"
support/2013-04-15/service-2.json: "protocol":"json"
*/
// expand to use cfg!() so codegen only gets run for services
// in the features list
macro_rules! services {
( $( [$name:expr, $date:expr] ),* ) => {
{
let mut services = Vec::new();
$(
if cfg!(feature = $name) {
services.push(Service::new($name, $date));
}
)*
services
}
}
}
fn main() {
let out_dir = env::var_os("OUT_DIR").expect("OUT_DIR not specified");
let out_path = Path::new(&out_dir).to_owned();
let services = services! {
["acm", "2015-12-08"],
["autoscaling", "2011-01-01"],
["cloudformation", "2010-05-15"],
["cloudfront", "2016-11-25"],
["cloudhsm", "2014-05-30"],
["cloudsearch", "2013-01-01"],
["cloudtrail", "2013-11-01"],
["cloudwatch", "2010-08-01"],
["codecommit", "2015-04-13"],
["codedeploy", "2014-10-06"],
["codepipeline", "2015-07-09"],
["cognito-identity", "2014-06-30"],
["config", "2014-11-12"],
["datapipeline", "2012-10-29"],
["devicefarm", "2015-06-23"],
["directconnect", "2012-10-25"],
["ds", "2015-04-16"],
["dynamodb", "2012-08-10"],
["dynamodbstreams", "2012-08-10"],
["ec2", "2016-11-15"],
["ecr", "2015-09-21"],
["ecs", "2014-11-13"],
["elasticache", "2015-02-02"],
["elasticbeanstalk", "2010-12-01"],
["elastictranscoder", "2012-09-25"],
["elb", "2012-06-01"],
["elbv2", "2015-12-01"],
["emr", "2009-03-31"],
["events", "2015-10-07"],
["firehose", "2015-08-04"],
["iam", "2010-05-08"],
["importexport", "2010-06-01"],
["inspector", "2016-02-16"],
["iot", "2015-05-28"],
["kinesis", "2013-12-02"],
["kms", "2014-11-01"],
["lambda", "2015-03-31"],
["logs", "2014-03-28"],
["machinelearning", "2014-12-12"],
["marketplacecommerceanalytics", "2015-07-01"],
["opsworks", "2013-02-18"],
["redshift", "2012-12-01"],
["rds", "2014-10-31"],
["route53", "2013-04-01"],
["route53domains", "2014-05-15"],
["s3", "2006-03-01"],
["sdb", "2009-04-15"],
["sns", "2010-03-31"],
["sqs", "2012-11-05"],
["ssm", "2014-11-06"],
["storagegateway", "2013-06-30"],
["sts", "2011-06-15"],
["swf", "2012-01-25"],
["waf", "2015-08-24"],
["workspaces", "2015-04-08"]
};
let count: usize = services.into_par_iter().map(|service| generate(service, &out_path.clone())).count();
println!("\nGenerated {:?} services.\n", count);
generate_user_agent_vars(&out_path);
let codegen_dir = Path::new("codegen");
// avoid unnecessary recompiles when used as a crates.io dependency
if codegen_dir.exists() {
println!("cargo:rerun-if-changed=codegen");
}
}
| {
let rust_version = rustc_version::version();
let mut f = File::create(&output_path.join("user_agent_vars.rs"))
.expect("Could not create user agent file");
f.write_all(format!("static RUST_VERSION: &'static str = \"{}\";", rust_version).as_bytes())
.expect("Unable to write user agent");
} | identifier_body |
build.rs | extern crate rustc_version;
extern crate rusoto_codegen;
extern crate rayon;
use std::env;
use std::path::Path;
use std::io::Write;
use std::fs::File;
use rusoto_codegen::{Service, generate};
use rayon::prelude::*;
/// Parses and generates variables used to construct a User-Agent.
///
/// This is used to create a User-Agent header string resembling
/// `rusoto/x.y.z rust/x.y.z <os>`.
fn generate_user_agent_vars(output_path: &Path) {
let rust_version = rustc_version::version();
let mut f = File::create(&output_path.join("user_agent_vars.rs"))
.expect("Could not create user agent file");
f.write_all(format!("static RUST_VERSION: &'static str = \"{}\";", rust_version).as_bytes())
.expect("Unable to write user agent");
}
/*
gamelift/2015-10-01/service-2.json: "protocol":"json"
support/2013-04-15/service-2.json: "protocol":"json"
*/
// expand to use cfg!() so codegen only gets run for services
// in the features list
macro_rules! services {
( $( [$name:expr, $date:expr] ),* ) => {
{
let mut services = Vec::new();
$(
if cfg!(feature = $name) {
services.push(Service::new($name, $date));
}
)*
services
}
}
}
fn main() {
let out_dir = env::var_os("OUT_DIR").expect("OUT_DIR not specified");
let out_path = Path::new(&out_dir).to_owned();
let services = services! {
["acm", "2015-12-08"],
["autoscaling", "2011-01-01"],
["cloudformation", "2010-05-15"],
["cloudfront", "2016-11-25"],
["cloudhsm", "2014-05-30"],
["cloudsearch", "2013-01-01"],
["cloudtrail", "2013-11-01"],
["cloudwatch", "2010-08-01"],
["codecommit", "2015-04-13"],
["codedeploy", "2014-10-06"],
["codepipeline", "2015-07-09"],
["cognito-identity", "2014-06-30"],
["config", "2014-11-12"],
["datapipeline", "2012-10-29"],
["devicefarm", "2015-06-23"],
["directconnect", "2012-10-25"],
["ds", "2015-04-16"],
["dynamodb", "2012-08-10"],
["dynamodbstreams", "2012-08-10"],
["ec2", "2016-11-15"],
["ecr", "2015-09-21"],
["ecs", "2014-11-13"],
["elasticache", "2015-02-02"],
["elasticbeanstalk", "2010-12-01"],
["elastictranscoder", "2012-09-25"],
["elb", "2012-06-01"],
["elbv2", "2015-12-01"],
["emr", "2009-03-31"],
["events", "2015-10-07"],
["firehose", "2015-08-04"],
["iam", "2010-05-08"],
["importexport", "2010-06-01"],
["inspector", "2016-02-16"],
["iot", "2015-05-28"],
["kinesis", "2013-12-02"],
["kms", "2014-11-01"],
["lambda", "2015-03-31"],
["logs", "2014-03-28"],
["machinelearning", "2014-12-12"],
["marketplacecommerceanalytics", "2015-07-01"],
["opsworks", "2013-02-18"],
["redshift", "2012-12-01"],
["rds", "2014-10-31"],
["route53", "2013-04-01"],
["route53domains", "2014-05-15"],
["s3", "2006-03-01"],
["sdb", "2009-04-15"],
["sns", "2010-03-31"],
["sqs", "2012-11-05"],
["ssm", "2014-11-06"],
["storagegateway", "2013-06-30"],
["sts", "2011-06-15"],
["swf", "2012-01-25"],
["waf", "2015-08-24"],
["workspaces", "2015-04-08"]
};
let count: usize = services.into_par_iter().map(|service| generate(service, &out_path.clone())).count();
println!("\nGenerated {:?} services.\n", count);
generate_user_agent_vars(&out_path);
let codegen_dir = Path::new("codegen");
// avoid unnecessary recompiles when used as a crates.io dependency
if codegen_dir.exists() |
}
| {
println!("cargo:rerun-if-changed=codegen");
} | conditional_block |
build.rs | extern crate rustc_version;
extern crate rusoto_codegen;
extern crate rayon;
use std::env;
use std::path::Path;
use std::io::Write;
use std::fs::File;
use rusoto_codegen::{Service, generate};
use rayon::prelude::*;
/// Parses and generates variables used to construct a User-Agent.
///
/// This is used to create a User-Agent header string resembling
/// `rusoto/x.y.z rust/x.y.z <os>`.
fn | (output_path: &Path) {
let rust_version = rustc_version::version();
let mut f = File::create(&output_path.join("user_agent_vars.rs"))
.expect("Could not create user agent file");
f.write_all(format!("static RUST_VERSION: &'static str = \"{}\";", rust_version).as_bytes())
.expect("Unable to write user agent");
}
/*
gamelift/2015-10-01/service-2.json: "protocol":"json"
support/2013-04-15/service-2.json: "protocol":"json"
*/
// expand to use cfg!() so codegen only gets run for services
// in the features list
macro_rules! services {
( $( [$name:expr, $date:expr] ),* ) => {
{
let mut services = Vec::new();
$(
if cfg!(feature = $name) {
services.push(Service::new($name, $date));
}
)*
services
}
}
}
fn main() {
let out_dir = env::var_os("OUT_DIR").expect("OUT_DIR not specified");
let out_path = Path::new(&out_dir).to_owned();
let services = services! {
["acm", "2015-12-08"],
["autoscaling", "2011-01-01"],
["cloudformation", "2010-05-15"],
["cloudfront", "2016-11-25"],
["cloudhsm", "2014-05-30"],
["cloudsearch", "2013-01-01"],
["cloudtrail", "2013-11-01"],
["cloudwatch", "2010-08-01"],
["codecommit", "2015-04-13"],
["codedeploy", "2014-10-06"],
["codepipeline", "2015-07-09"],
["cognito-identity", "2014-06-30"],
["config", "2014-11-12"],
["datapipeline", "2012-10-29"],
["devicefarm", "2015-06-23"],
["directconnect", "2012-10-25"],
["ds", "2015-04-16"],
["dynamodb", "2012-08-10"],
["dynamodbstreams", "2012-08-10"],
["ec2", "2016-11-15"],
["ecr", "2015-09-21"],
["ecs", "2014-11-13"],
["elasticache", "2015-02-02"],
["elasticbeanstalk", "2010-12-01"],
["elastictranscoder", "2012-09-25"],
["elb", "2012-06-01"],
["elbv2", "2015-12-01"],
["emr", "2009-03-31"],
["events", "2015-10-07"],
["firehose", "2015-08-04"],
["iam", "2010-05-08"],
["importexport", "2010-06-01"],
["inspector", "2016-02-16"],
["iot", "2015-05-28"],
["kinesis", "2013-12-02"],
["kms", "2014-11-01"],
["lambda", "2015-03-31"],
["logs", "2014-03-28"],
["machinelearning", "2014-12-12"],
["marketplacecommerceanalytics", "2015-07-01"],
["opsworks", "2013-02-18"],
["redshift", "2012-12-01"],
["rds", "2014-10-31"],
["route53", "2013-04-01"],
["route53domains", "2014-05-15"],
["s3", "2006-03-01"],
["sdb", "2009-04-15"],
["sns", "2010-03-31"],
["sqs", "2012-11-05"],
["ssm", "2014-11-06"],
["storagegateway", "2013-06-30"],
["sts", "2011-06-15"],
["swf", "2012-01-25"],
["waf", "2015-08-24"],
["workspaces", "2015-04-08"]
};
let count: usize = services.into_par_iter().map(|service| generate(service, &out_path.clone())).count();
println!("\nGenerated {:?} services.\n", count);
generate_user_agent_vars(&out_path);
let codegen_dir = Path::new("codegen");
// avoid unnecessary recompiles when used as a crates.io dependency
if codegen_dir.exists() {
println!("cargo:rerun-if-changed=codegen");
}
}
| generate_user_agent_vars | identifier_name |
p041.rs | //! [Problem 41](https://projecteuler.net/problem=41) solver.
#![warn(
bad_style,
unused,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
unused_results
)]
use integer::Integer;
use iter::Permutations;
use prime::PrimeSet;
// 1 + 2 +... + 9 = 45 (dividable by 9 => 9-pandigimal number is dividable by 9)
// 1 + 2 +... + 8 = 36 (dividable by 9 => 9-pandigimal number is dividable by 9)
// 7-pandigimal may be the largest pandigimal prime.
fn compute() -> u64 {
let radix = 10;
let ps = PrimeSet::new();
for (perm, _) in Permutations::new(&[7, 6, 5, 4, 3, 2, 1], 7) {
let n = Integer::from_digits(perm.iter().rev().copied(), radix);
if ps.contains(n) {
return n;
}
}
unreachable!()
}
fn | () -> String {
compute().to_string()
}
common::problem!("7652413", solve);
| solve | identifier_name |
p041.rs | //! [Problem 41](https://projecteuler.net/problem=41) solver.
#![warn(
bad_style,
unused,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
unused_results
)]
use integer::Integer;
use iter::Permutations;
use prime::PrimeSet;
// 1 + 2 +... + 9 = 45 (dividable by 9 => 9-pandigimal number is dividable by 9)
// 1 + 2 +... + 8 = 36 (dividable by 9 => 9-pandigimal number is dividable by 9)
// 7-pandigimal may be the largest pandigimal prime.
fn compute() -> u64 {
let radix = 10;
let ps = PrimeSet::new();
for (perm, _) in Permutations::new(&[7, 6, 5, 4, 3, 2, 1], 7) {
let n = Integer::from_digits(perm.iter().rev().copied(), radix);
if ps.contains(n) |
}
unreachable!()
}
fn solve() -> String {
compute().to_string()
}
common::problem!("7652413", solve);
| {
return n;
} | conditional_block |
p041.rs | //! [Problem 41](https://projecteuler.net/problem=41) solver.
#![warn(
bad_style,
unused,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
unused_results
)]
use integer::Integer;
use iter::Permutations;
use prime::PrimeSet;
// 1 + 2 +... + 9 = 45 (dividable by 9 => 9-pandigimal number is dividable by 9)
// 1 + 2 +... + 8 = 36 (dividable by 9 => 9-pandigimal number is dividable by 9)
// 7-pandigimal may be the largest pandigimal prime.
fn compute() -> u64 |
fn solve() -> String {
compute().to_string()
}
common::problem!("7652413", solve);
| {
let radix = 10;
let ps = PrimeSet::new();
for (perm, _) in Permutations::new(&[7, 6, 5, 4, 3, 2, 1], 7) {
let n = Integer::from_digits(perm.iter().rev().copied(), radix);
if ps.contains(n) {
return n;
}
}
unreachable!()
} | identifier_body |
line.rs | /**
* Flow - Realtime log analyzer
* Copyright (C) 2016 Daniel Mircea
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
use std::cmp::max;
use std::collections::VecDeque;
use std::iter::{Rev, DoubleEndedIterator};
use unicode_width::UnicodeWidthStr;
use core::filter::{Filter, Parser as FilterParser, Constraint, ParserResult as FilterParserResult};
use utils::ansi_decoder::{ComponentCollection, AnsiStr};
#[derive(Clone)]
pub struct Line {
pub content_without_ansi: String,
pub components: Option<ComponentCollection>,
pub width: usize,
}
impl Line {
pub fn new(content: String) -> Line {
let has_ansi = content.has_ansi_escape_sequence();
let (content_without_ansi, components) = if has_ansi {
(content.strip_ansi(), Some(content.to_components()))
} else {
(content, None)
};
Line {
width: content_without_ansi.width(),
content_without_ansi: content_without_ansi,
components: components,
}
}
pub fn guess_height(&self, container_width: usize) -> usize {
max(1,
(self.width as f32 / container_width as f32).ceil() as usize)
}
pub fn matches_for(&self, text: &str) -> Vec<(usize, &str)> {
self.content_without_ansi.match_indices(text).collect()
}
pub fn contains(&self, text: &str) -> bool {
self.content_without_ansi.contains(text)
}
}
pub struct LineCollection {
pub entries: VecDeque<Line>,
capacity: usize,
}
impl LineCollection {
pub fn new(capacity: usize) -> LineCollection {
LineCollection {
entries: VecDeque::new(),
capacity: capacity,
}
}
fn clear_excess(&mut self) {
while self.entries.len() > self.capacity {
self.entries.pop_front();
}
}
pub fn len(&self) -> usize {
self.entries.len()
}
fn add(&mut self, item: String) {
self.entries.push_back(Line::new(item));
}
} |
impl Extend<String> for LineCollection {
fn extend<T: IntoIterator<Item = String>>(&mut self, iter: T) {
for item in iter {
self.add(item);
}
self.clear_excess();
}
}
pub struct ParserState<'a, I>
where I: DoubleEndedIterator<Item = &'a Line>
{
iterator: I,
parser: FilterParser,
pending: Vec<&'a Line>,
}
impl<'a, I> ParserState<'a, I>
where I: DoubleEndedIterator<Item = &'a Line>
{
fn handle_empty(&mut self) -> Option<I::Item> {
self.iterator.next()
}
fn handle_content(&mut self) -> Option<I::Item> {
let matcher = self.parser.filter.content.as_ref().unwrap();
(&mut self.iterator).filter(|line| matcher.is_match(&line.content_without_ansi)).next()
}
fn handle_boundaries(&mut self) -> Option<I::Item> {
if self.pending.is_empty() {
let mut match_found = false;
for line in &mut self.iterator {
match self.parser.matches(&line.content_without_ansi) {
FilterParserResult::Match => self.pending.push(line),
FilterParserResult::LastMatch(append) => {
match_found = true;
if append {
self.pending.push(line);
}
break;
}
FilterParserResult::Invalid(append) => {
self.pending.clear();
if append {
self.pending.push(line);
}
}
FilterParserResult::NoMatch => {}
}
}
if!(match_found || self.parser.assume_found_matches()) {
return None;
}
self.pending.reverse();
}
self.pending.pop()
}
}
pub trait Parser<'a>: Iterator<Item = &'a Line> {
fn parse(self, filter: Filter) -> ParserState<'a, Rev<Self>>
where Self: DoubleEndedIterator + Sized;
}
impl<'a, I> Parser<'a> for I
where I: Iterator<Item = &'a Line>
{
fn parse(self, filter: Filter) -> ParserState<'a, Rev<Self>>
where Self: DoubleEndedIterator + Sized
{
ParserState {
iterator: self.rev(),
pending: vec![],
parser: FilterParser::new(filter),
}
}
}
impl<'a, I> Iterator for ParserState<'a, I>
where I: DoubleEndedIterator<Item = &'a Line>
{
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
if self.parser.constraints.is_empty() {
self.handle_empty()
} else if self.parser.constraints == vec![Constraint::Content] {
self.handle_content()
} else {
self.handle_boundaries()
}
}
} | random_line_split |
|
line.rs | /**
* Flow - Realtime log analyzer
* Copyright (C) 2016 Daniel Mircea
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
use std::cmp::max;
use std::collections::VecDeque;
use std::iter::{Rev, DoubleEndedIterator};
use unicode_width::UnicodeWidthStr;
use core::filter::{Filter, Parser as FilterParser, Constraint, ParserResult as FilterParserResult};
use utils::ansi_decoder::{ComponentCollection, AnsiStr};
#[derive(Clone)]
pub struct | {
pub content_without_ansi: String,
pub components: Option<ComponentCollection>,
pub width: usize,
}
impl Line {
pub fn new(content: String) -> Line {
let has_ansi = content.has_ansi_escape_sequence();
let (content_without_ansi, components) = if has_ansi {
(content.strip_ansi(), Some(content.to_components()))
} else {
(content, None)
};
Line {
width: content_without_ansi.width(),
content_without_ansi: content_without_ansi,
components: components,
}
}
pub fn guess_height(&self, container_width: usize) -> usize {
max(1,
(self.width as f32 / container_width as f32).ceil() as usize)
}
pub fn matches_for(&self, text: &str) -> Vec<(usize, &str)> {
self.content_without_ansi.match_indices(text).collect()
}
pub fn contains(&self, text: &str) -> bool {
self.content_without_ansi.contains(text)
}
}
pub struct LineCollection {
pub entries: VecDeque<Line>,
capacity: usize,
}
impl LineCollection {
pub fn new(capacity: usize) -> LineCollection {
LineCollection {
entries: VecDeque::new(),
capacity: capacity,
}
}
fn clear_excess(&mut self) {
while self.entries.len() > self.capacity {
self.entries.pop_front();
}
}
pub fn len(&self) -> usize {
self.entries.len()
}
fn add(&mut self, item: String) {
self.entries.push_back(Line::new(item));
}
}
impl Extend<String> for LineCollection {
fn extend<T: IntoIterator<Item = String>>(&mut self, iter: T) {
for item in iter {
self.add(item);
}
self.clear_excess();
}
}
pub struct ParserState<'a, I>
where I: DoubleEndedIterator<Item = &'a Line>
{
iterator: I,
parser: FilterParser,
pending: Vec<&'a Line>,
}
impl<'a, I> ParserState<'a, I>
where I: DoubleEndedIterator<Item = &'a Line>
{
fn handle_empty(&mut self) -> Option<I::Item> {
self.iterator.next()
}
fn handle_content(&mut self) -> Option<I::Item> {
let matcher = self.parser.filter.content.as_ref().unwrap();
(&mut self.iterator).filter(|line| matcher.is_match(&line.content_without_ansi)).next()
}
fn handle_boundaries(&mut self) -> Option<I::Item> {
if self.pending.is_empty() {
let mut match_found = false;
for line in &mut self.iterator {
match self.parser.matches(&line.content_without_ansi) {
FilterParserResult::Match => self.pending.push(line),
FilterParserResult::LastMatch(append) => {
match_found = true;
if append {
self.pending.push(line);
}
break;
}
FilterParserResult::Invalid(append) => {
self.pending.clear();
if append {
self.pending.push(line);
}
}
FilterParserResult::NoMatch => {}
}
}
if!(match_found || self.parser.assume_found_matches()) {
return None;
}
self.pending.reverse();
}
self.pending.pop()
}
}
pub trait Parser<'a>: Iterator<Item = &'a Line> {
fn parse(self, filter: Filter) -> ParserState<'a, Rev<Self>>
where Self: DoubleEndedIterator + Sized;
}
impl<'a, I> Parser<'a> for I
where I: Iterator<Item = &'a Line>
{
fn parse(self, filter: Filter) -> ParserState<'a, Rev<Self>>
where Self: DoubleEndedIterator + Sized
{
ParserState {
iterator: self.rev(),
pending: vec![],
parser: FilterParser::new(filter),
}
}
}
impl<'a, I> Iterator for ParserState<'a, I>
where I: DoubleEndedIterator<Item = &'a Line>
{
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
if self.parser.constraints.is_empty() {
self.handle_empty()
} else if self.parser.constraints == vec![Constraint::Content] {
self.handle_content()
} else {
self.handle_boundaries()
}
}
}
| Line | identifier_name |
line.rs | /**
* Flow - Realtime log analyzer
* Copyright (C) 2016 Daniel Mircea
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
use std::cmp::max;
use std::collections::VecDeque;
use std::iter::{Rev, DoubleEndedIterator};
use unicode_width::UnicodeWidthStr;
use core::filter::{Filter, Parser as FilterParser, Constraint, ParserResult as FilterParserResult};
use utils::ansi_decoder::{ComponentCollection, AnsiStr};
#[derive(Clone)]
pub struct Line {
pub content_without_ansi: String,
pub components: Option<ComponentCollection>,
pub width: usize,
}
impl Line {
pub fn new(content: String) -> Line {
let has_ansi = content.has_ansi_escape_sequence();
let (content_without_ansi, components) = if has_ansi {
(content.strip_ansi(), Some(content.to_components()))
} else {
(content, None)
};
Line {
width: content_without_ansi.width(),
content_without_ansi: content_without_ansi,
components: components,
}
}
pub fn guess_height(&self, container_width: usize) -> usize {
max(1,
(self.width as f32 / container_width as f32).ceil() as usize)
}
pub fn matches_for(&self, text: &str) -> Vec<(usize, &str)> {
self.content_without_ansi.match_indices(text).collect()
}
pub fn contains(&self, text: &str) -> bool {
self.content_without_ansi.contains(text)
}
}
pub struct LineCollection {
pub entries: VecDeque<Line>,
capacity: usize,
}
impl LineCollection {
pub fn new(capacity: usize) -> LineCollection |
fn clear_excess(&mut self) {
while self.entries.len() > self.capacity {
self.entries.pop_front();
}
}
pub fn len(&self) -> usize {
self.entries.len()
}
fn add(&mut self, item: String) {
self.entries.push_back(Line::new(item));
}
}
impl Extend<String> for LineCollection {
fn extend<T: IntoIterator<Item = String>>(&mut self, iter: T) {
for item in iter {
self.add(item);
}
self.clear_excess();
}
}
pub struct ParserState<'a, I>
where I: DoubleEndedIterator<Item = &'a Line>
{
iterator: I,
parser: FilterParser,
pending: Vec<&'a Line>,
}
impl<'a, I> ParserState<'a, I>
where I: DoubleEndedIterator<Item = &'a Line>
{
fn handle_empty(&mut self) -> Option<I::Item> {
self.iterator.next()
}
fn handle_content(&mut self) -> Option<I::Item> {
let matcher = self.parser.filter.content.as_ref().unwrap();
(&mut self.iterator).filter(|line| matcher.is_match(&line.content_without_ansi)).next()
}
fn handle_boundaries(&mut self) -> Option<I::Item> {
if self.pending.is_empty() {
let mut match_found = false;
for line in &mut self.iterator {
match self.parser.matches(&line.content_without_ansi) {
FilterParserResult::Match => self.pending.push(line),
FilterParserResult::LastMatch(append) => {
match_found = true;
if append {
self.pending.push(line);
}
break;
}
FilterParserResult::Invalid(append) => {
self.pending.clear();
if append {
self.pending.push(line);
}
}
FilterParserResult::NoMatch => {}
}
}
if!(match_found || self.parser.assume_found_matches()) {
return None;
}
self.pending.reverse();
}
self.pending.pop()
}
}
pub trait Parser<'a>: Iterator<Item = &'a Line> {
fn parse(self, filter: Filter) -> ParserState<'a, Rev<Self>>
where Self: DoubleEndedIterator + Sized;
}
impl<'a, I> Parser<'a> for I
where I: Iterator<Item = &'a Line>
{
fn parse(self, filter: Filter) -> ParserState<'a, Rev<Self>>
where Self: DoubleEndedIterator + Sized
{
ParserState {
iterator: self.rev(),
pending: vec![],
parser: FilterParser::new(filter),
}
}
}
impl<'a, I> Iterator for ParserState<'a, I>
where I: DoubleEndedIterator<Item = &'a Line>
{
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
if self.parser.constraints.is_empty() {
self.handle_empty()
} else if self.parser.constraints == vec![Constraint::Content] {
self.handle_content()
} else {
self.handle_boundaries()
}
}
}
| {
LineCollection {
entries: VecDeque::new(),
capacity: capacity,
}
} | identifier_body |
line.rs | /**
* Flow - Realtime log analyzer
* Copyright (C) 2016 Daniel Mircea
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
use std::cmp::max;
use std::collections::VecDeque;
use std::iter::{Rev, DoubleEndedIterator};
use unicode_width::UnicodeWidthStr;
use core::filter::{Filter, Parser as FilterParser, Constraint, ParserResult as FilterParserResult};
use utils::ansi_decoder::{ComponentCollection, AnsiStr};
#[derive(Clone)]
pub struct Line {
pub content_without_ansi: String,
pub components: Option<ComponentCollection>,
pub width: usize,
}
impl Line {
pub fn new(content: String) -> Line {
let has_ansi = content.has_ansi_escape_sequence();
let (content_without_ansi, components) = if has_ansi {
(content.strip_ansi(), Some(content.to_components()))
} else {
(content, None)
};
Line {
width: content_without_ansi.width(),
content_without_ansi: content_without_ansi,
components: components,
}
}
pub fn guess_height(&self, container_width: usize) -> usize {
max(1,
(self.width as f32 / container_width as f32).ceil() as usize)
}
pub fn matches_for(&self, text: &str) -> Vec<(usize, &str)> {
self.content_without_ansi.match_indices(text).collect()
}
pub fn contains(&self, text: &str) -> bool {
self.content_without_ansi.contains(text)
}
}
pub struct LineCollection {
pub entries: VecDeque<Line>,
capacity: usize,
}
impl LineCollection {
pub fn new(capacity: usize) -> LineCollection {
LineCollection {
entries: VecDeque::new(),
capacity: capacity,
}
}
fn clear_excess(&mut self) {
while self.entries.len() > self.capacity {
self.entries.pop_front();
}
}
pub fn len(&self) -> usize {
self.entries.len()
}
fn add(&mut self, item: String) {
self.entries.push_back(Line::new(item));
}
}
impl Extend<String> for LineCollection {
fn extend<T: IntoIterator<Item = String>>(&mut self, iter: T) {
for item in iter {
self.add(item);
}
self.clear_excess();
}
}
pub struct ParserState<'a, I>
where I: DoubleEndedIterator<Item = &'a Line>
{
iterator: I,
parser: FilterParser,
pending: Vec<&'a Line>,
}
impl<'a, I> ParserState<'a, I>
where I: DoubleEndedIterator<Item = &'a Line>
{
fn handle_empty(&mut self) -> Option<I::Item> {
self.iterator.next()
}
fn handle_content(&mut self) -> Option<I::Item> {
let matcher = self.parser.filter.content.as_ref().unwrap();
(&mut self.iterator).filter(|line| matcher.is_match(&line.content_without_ansi)).next()
}
fn handle_boundaries(&mut self) -> Option<I::Item> {
if self.pending.is_empty() {
let mut match_found = false;
for line in &mut self.iterator {
match self.parser.matches(&line.content_without_ansi) {
FilterParserResult::Match => self.pending.push(line),
FilterParserResult::LastMatch(append) => |
FilterParserResult::Invalid(append) => {
self.pending.clear();
if append {
self.pending.push(line);
}
}
FilterParserResult::NoMatch => {}
}
}
if!(match_found || self.parser.assume_found_matches()) {
return None;
}
self.pending.reverse();
}
self.pending.pop()
}
}
pub trait Parser<'a>: Iterator<Item = &'a Line> {
fn parse(self, filter: Filter) -> ParserState<'a, Rev<Self>>
where Self: DoubleEndedIterator + Sized;
}
impl<'a, I> Parser<'a> for I
where I: Iterator<Item = &'a Line>
{
fn parse(self, filter: Filter) -> ParserState<'a, Rev<Self>>
where Self: DoubleEndedIterator + Sized
{
ParserState {
iterator: self.rev(),
pending: vec![],
parser: FilterParser::new(filter),
}
}
}
impl<'a, I> Iterator for ParserState<'a, I>
where I: DoubleEndedIterator<Item = &'a Line>
{
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
if self.parser.constraints.is_empty() {
self.handle_empty()
} else if self.parser.constraints == vec![Constraint::Content] {
self.handle_content()
} else {
self.handle_boundaries()
}
}
}
| {
match_found = true;
if append {
self.pending.push(line);
}
break;
} | conditional_block |
deriving-cmp-generic-struct.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[deriving(Eq, TotalEq, Ord, TotalOrd)]
struct S<T> {
x: T,
y: T
}
pub fn main() |
// TotalEq
assert_eq!(s1.equals(s2), eq);
// Ord
assert_eq!(*s1 < *s2, lt);
assert_eq!(*s1 > *s2, gt);
assert_eq!(*s1 <= *s2, le);
assert_eq!(*s1 >= *s2, ge);
// TotalOrd
assert_eq!(s1.cmp(s2), ord);
}
}
}
| {
let s1 = S {x: 1, y: 1};
let s2 = S {x: 1, y: 2};
// in order for both Ord and TotalOrd
let ss = [s1, s2];
for (i, s1) in ss.iter().enumerate() {
for (j, s2) in ss.iter().enumerate() {
let ord = i.cmp(&j);
let eq = i == j;
let lt = i < j;
let le = i <= j;
let gt = i > j;
let ge = i >= j;
// Eq
assert_eq!(*s1 == *s2, eq);
assert_eq!(*s1 != *s2, !eq); | identifier_body |
deriving-cmp-generic-struct.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
| y: T
}
pub fn main() {
let s1 = S {x: 1, y: 1};
let s2 = S {x: 1, y: 2};
// in order for both Ord and TotalOrd
let ss = [s1, s2];
for (i, s1) in ss.iter().enumerate() {
for (j, s2) in ss.iter().enumerate() {
let ord = i.cmp(&j);
let eq = i == j;
let lt = i < j;
let le = i <= j;
let gt = i > j;
let ge = i >= j;
// Eq
assert_eq!(*s1 == *s2, eq);
assert_eq!(*s1!= *s2,!eq);
// TotalEq
assert_eq!(s1.equals(s2), eq);
// Ord
assert_eq!(*s1 < *s2, lt);
assert_eq!(*s1 > *s2, gt);
assert_eq!(*s1 <= *s2, le);
assert_eq!(*s1 >= *s2, ge);
// TotalOrd
assert_eq!(s1.cmp(s2), ord);
}
}
} | #[deriving(Eq, TotalEq, Ord, TotalOrd)]
struct S<T> {
x: T, | random_line_split |
deriving-cmp-generic-struct.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[deriving(Eq, TotalEq, Ord, TotalOrd)]
struct S<T> {
x: T,
y: T
}
pub fn | () {
let s1 = S {x: 1, y: 1};
let s2 = S {x: 1, y: 2};
// in order for both Ord and TotalOrd
let ss = [s1, s2];
for (i, s1) in ss.iter().enumerate() {
for (j, s2) in ss.iter().enumerate() {
let ord = i.cmp(&j);
let eq = i == j;
let lt = i < j;
let le = i <= j;
let gt = i > j;
let ge = i >= j;
// Eq
assert_eq!(*s1 == *s2, eq);
assert_eq!(*s1!= *s2,!eq);
// TotalEq
assert_eq!(s1.equals(s2), eq);
// Ord
assert_eq!(*s1 < *s2, lt);
assert_eq!(*s1 > *s2, gt);
assert_eq!(*s1 <= *s2, le);
assert_eq!(*s1 >= *s2, ge);
// TotalOrd
assert_eq!(s1.cmp(s2), ord);
}
}
}
| main | identifier_name |
mod.rs | //! Thrift generated Jaeger client
//!
//! Definitions: <https://github.com/uber/jaeger-idl/blob/master/thrift/>
use std::time::{Duration, SystemTime};
use opentelemetry::trace::Event;
use opentelemetry::{Key, KeyValue, Value};
pub(crate) mod agent;
pub(crate) mod jaeger;
pub(crate) mod zipkincore;
impl From<super::Process> for jaeger::Process {
fn | (process: super::Process) -> jaeger::Process {
jaeger::Process::new(
process.service_name,
Some(process.tags.into_iter().map(Into::into).collect()),
)
}
}
impl From<Event> for jaeger::Log {
fn from(event: crate::exporter::Event) -> jaeger::Log {
let timestamp = event
.timestamp
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap_or_else(|_| Duration::from_secs(0))
.as_micros() as i64;
let mut event_set_via_attribute = false;
let mut fields = event
.attributes
.into_iter()
.map(|attr| {
if attr.key.as_str() == "event" {
event_set_via_attribute = true;
};
attr.into()
})
.collect::<Vec<_>>();
if!event_set_via_attribute {
fields.push(Key::new("event").string(event.name).into());
}
if event.dropped_attributes_count!= 0 {
fields.push(
Key::new("otel.event.dropped_attributes_count")
.i64(i64::from(event.dropped_attributes_count))
.into(),
);
}
jaeger::Log::new(timestamp, fields)
}
}
#[rustfmt::skip]
impl From<KeyValue> for jaeger::Tag {
fn from(kv: KeyValue) -> jaeger::Tag {
let KeyValue { key, value } = kv;
match value {
Value::String(s) => jaeger::Tag::new(key.into(), jaeger::TagType::String, Some(s.into()), None, None, None, None),
Value::F64(f) => jaeger::Tag::new(key.into(), jaeger::TagType::Double, None, Some(f.into()), None, None, None),
Value::Bool(b) => jaeger::Tag::new(key.into(), jaeger::TagType::Bool, None, None, Some(b), None, None),
Value::I64(i) => jaeger::Tag::new(key.into(), jaeger::TagType::Long, None, None, None, Some(i), None),
// TODO: better Array handling, jaeger thrift doesn't support arrays
v @ Value::Array(_) => jaeger::Tag::new(key.into(), jaeger::TagType::String, Some(v.to_string()), None, None, None, None),
}
}
}
| from | identifier_name |
mod.rs | //! Thrift generated Jaeger client
//!
//! Definitions: <https://github.com/uber/jaeger-idl/blob/master/thrift/>
use std::time::{Duration, SystemTime};
use opentelemetry::trace::Event;
use opentelemetry::{Key, KeyValue, Value};
|
impl From<super::Process> for jaeger::Process {
fn from(process: super::Process) -> jaeger::Process {
jaeger::Process::new(
process.service_name,
Some(process.tags.into_iter().map(Into::into).collect()),
)
}
}
impl From<Event> for jaeger::Log {
fn from(event: crate::exporter::Event) -> jaeger::Log {
let timestamp = event
.timestamp
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap_or_else(|_| Duration::from_secs(0))
.as_micros() as i64;
let mut event_set_via_attribute = false;
let mut fields = event
.attributes
.into_iter()
.map(|attr| {
if attr.key.as_str() == "event" {
event_set_via_attribute = true;
};
attr.into()
})
.collect::<Vec<_>>();
if!event_set_via_attribute {
fields.push(Key::new("event").string(event.name).into());
}
if event.dropped_attributes_count!= 0 {
fields.push(
Key::new("otel.event.dropped_attributes_count")
.i64(i64::from(event.dropped_attributes_count))
.into(),
);
}
jaeger::Log::new(timestamp, fields)
}
}
#[rustfmt::skip]
impl From<KeyValue> for jaeger::Tag {
fn from(kv: KeyValue) -> jaeger::Tag {
let KeyValue { key, value } = kv;
match value {
Value::String(s) => jaeger::Tag::new(key.into(), jaeger::TagType::String, Some(s.into()), None, None, None, None),
Value::F64(f) => jaeger::Tag::new(key.into(), jaeger::TagType::Double, None, Some(f.into()), None, None, None),
Value::Bool(b) => jaeger::Tag::new(key.into(), jaeger::TagType::Bool, None, None, Some(b), None, None),
Value::I64(i) => jaeger::Tag::new(key.into(), jaeger::TagType::Long, None, None, None, Some(i), None),
// TODO: better Array handling, jaeger thrift doesn't support arrays
v @ Value::Array(_) => jaeger::Tag::new(key.into(), jaeger::TagType::String, Some(v.to_string()), None, None, None, None),
}
}
} | pub(crate) mod agent;
pub(crate) mod jaeger;
pub(crate) mod zipkincore; | random_line_split |
mod.rs | //! Thrift generated Jaeger client
//!
//! Definitions: <https://github.com/uber/jaeger-idl/blob/master/thrift/>
use std::time::{Duration, SystemTime};
use opentelemetry::trace::Event;
use opentelemetry::{Key, KeyValue, Value};
pub(crate) mod agent;
pub(crate) mod jaeger;
pub(crate) mod zipkincore;
impl From<super::Process> for jaeger::Process {
fn from(process: super::Process) -> jaeger::Process {
jaeger::Process::new(
process.service_name,
Some(process.tags.into_iter().map(Into::into).collect()),
)
}
}
impl From<Event> for jaeger::Log {
fn from(event: crate::exporter::Event) -> jaeger::Log | }
if event.dropped_attributes_count!= 0 {
fields.push(
Key::new("otel.event.dropped_attributes_count")
.i64(i64::from(event.dropped_attributes_count))
.into(),
);
}
jaeger::Log::new(timestamp, fields)
}
}
#[rustfmt::skip]
impl From<KeyValue> for jaeger::Tag {
fn from(kv: KeyValue) -> jaeger::Tag {
let KeyValue { key, value } = kv;
match value {
Value::String(s) => jaeger::Tag::new(key.into(), jaeger::TagType::String, Some(s.into()), None, None, None, None),
Value::F64(f) => jaeger::Tag::new(key.into(), jaeger::TagType::Double, None, Some(f.into()), None, None, None),
Value::Bool(b) => jaeger::Tag::new(key.into(), jaeger::TagType::Bool, None, None, Some(b), None, None),
Value::I64(i) => jaeger::Tag::new(key.into(), jaeger::TagType::Long, None, None, None, Some(i), None),
// TODO: better Array handling, jaeger thrift doesn't support arrays
v @ Value::Array(_) => jaeger::Tag::new(key.into(), jaeger::TagType::String, Some(v.to_string()), None, None, None, None),
}
}
}
| {
let timestamp = event
.timestamp
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap_or_else(|_| Duration::from_secs(0))
.as_micros() as i64;
let mut event_set_via_attribute = false;
let mut fields = event
.attributes
.into_iter()
.map(|attr| {
if attr.key.as_str() == "event" {
event_set_via_attribute = true;
};
attr.into()
})
.collect::<Vec<_>>();
if !event_set_via_attribute {
fields.push(Key::new("event").string(event.name).into()); | identifier_body |
main.rs | #[cfg_attr(feature="clippy", allow(needless_range_loop))]
fn counting_sort(array: &mut [i32], min: i32, max: i32) {
// nothing to do for arrays shorter than 2
if array.len() < 2 {
return;
}
// we count occurences of values
let size = (max - min + 1) as usize;
let mut count = vec![0; size];
for e in array.iter() {
count[(*e - min) as usize] += 1;
}
// then we write values back, sorted
let mut index = 0;
for value in 0..count.len() {
for _ in 0..count[value] {
array[index] = value as i32;
index += 1;
}
}
}
fn main() {
let mut numbers = [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
counting_sort(&mut numbers, -31, 782);
}
#[cfg(test)]
mod tests {
extern crate meta;
fn check_sort(array: &mut [i32], min: i32, max: i32) {
super::counting_sort(array, min, max);
meta::test_utils::check_sorted(array);
}
#[test]
fn | () {
let numbers = &mut [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
check_sort(numbers, -31, 782);
}
#[test]
fn one_element_vector() {
let numbers = &mut [0i32];
check_sort(numbers, 0, 0);
}
#[test]
fn repeat_vector() {
let numbers = &mut [1i32, 1, 1, 1, 1];
check_sort(numbers, 1, 1);
}
#[test]
fn worst_case_vector() {
let numbers = &mut [20i32, 10, 0, -1, -5];
check_sort(numbers, -5, 20);
}
#[test]
fn already_sorted_vector() {
let numbers = &mut [-1i32, 0, 3, 6, 99];
check_sort(numbers, -1, 99);
}
#[test]
#[should_panic]
fn bad_min() {
let numbers = &mut [-1i32, 0, 3, 6, 99];
check_sort(numbers, 2, 99);
}
}
| rosetta_vector | identifier_name |
main.rs | #[cfg_attr(feature="clippy", allow(needless_range_loop))]
fn counting_sort(array: &mut [i32], min: i32, max: i32) {
// nothing to do for arrays shorter than 2
if array.len() < 2 {
return;
}
// we count occurences of values
let size = (max - min + 1) as usize; | count[(*e - min) as usize] += 1;
}
// then we write values back, sorted
let mut index = 0;
for value in 0..count.len() {
for _ in 0..count[value] {
array[index] = value as i32;
index += 1;
}
}
}
fn main() {
let mut numbers = [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
counting_sort(&mut numbers, -31, 782);
}
#[cfg(test)]
mod tests {
extern crate meta;
fn check_sort(array: &mut [i32], min: i32, max: i32) {
super::counting_sort(array, min, max);
meta::test_utils::check_sorted(array);
}
#[test]
fn rosetta_vector() {
let numbers = &mut [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
check_sort(numbers, -31, 782);
}
#[test]
fn one_element_vector() {
let numbers = &mut [0i32];
check_sort(numbers, 0, 0);
}
#[test]
fn repeat_vector() {
let numbers = &mut [1i32, 1, 1, 1, 1];
check_sort(numbers, 1, 1);
}
#[test]
fn worst_case_vector() {
let numbers = &mut [20i32, 10, 0, -1, -5];
check_sort(numbers, -5, 20);
}
#[test]
fn already_sorted_vector() {
let numbers = &mut [-1i32, 0, 3, 6, 99];
check_sort(numbers, -1, 99);
}
#[test]
#[should_panic]
fn bad_min() {
let numbers = &mut [-1i32, 0, 3, 6, 99];
check_sort(numbers, 2, 99);
}
} | let mut count = vec![0; size];
for e in array.iter() { | random_line_split |
main.rs | #[cfg_attr(feature="clippy", allow(needless_range_loop))]
fn counting_sort(array: &mut [i32], min: i32, max: i32) {
// nothing to do for arrays shorter than 2
if array.len() < 2 {
return;
}
// we count occurences of values
let size = (max - min + 1) as usize;
let mut count = vec![0; size];
for e in array.iter() {
count[(*e - min) as usize] += 1;
}
// then we write values back, sorted
let mut index = 0;
for value in 0..count.len() {
for _ in 0..count[value] {
array[index] = value as i32;
index += 1;
}
}
}
fn main() {
let mut numbers = [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
counting_sort(&mut numbers, -31, 782);
}
#[cfg(test)]
mod tests {
extern crate meta;
fn check_sort(array: &mut [i32], min: i32, max: i32) {
super::counting_sort(array, min, max);
meta::test_utils::check_sorted(array);
}
#[test]
fn rosetta_vector() {
let numbers = &mut [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
check_sort(numbers, -31, 782);
}
#[test]
fn one_element_vector() {
let numbers = &mut [0i32];
check_sort(numbers, 0, 0);
}
#[test]
fn repeat_vector() |
#[test]
fn worst_case_vector() {
let numbers = &mut [20i32, 10, 0, -1, -5];
check_sort(numbers, -5, 20);
}
#[test]
fn already_sorted_vector() {
let numbers = &mut [-1i32, 0, 3, 6, 99];
check_sort(numbers, -1, 99);
}
#[test]
#[should_panic]
fn bad_min() {
let numbers = &mut [-1i32, 0, 3, 6, 99];
check_sort(numbers, 2, 99);
}
}
| {
let numbers = &mut [1i32, 1, 1, 1, 1];
check_sort(numbers, 1, 1);
} | identifier_body |
main.rs | #[cfg_attr(feature="clippy", allow(needless_range_loop))]
fn counting_sort(array: &mut [i32], min: i32, max: i32) {
// nothing to do for arrays shorter than 2
if array.len() < 2 |
// we count occurences of values
let size = (max - min + 1) as usize;
let mut count = vec![0; size];
for e in array.iter() {
count[(*e - min) as usize] += 1;
}
// then we write values back, sorted
let mut index = 0;
for value in 0..count.len() {
for _ in 0..count[value] {
array[index] = value as i32;
index += 1;
}
}
}
fn main() {
let mut numbers = [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
counting_sort(&mut numbers, -31, 782);
}
#[cfg(test)]
mod tests {
extern crate meta;
fn check_sort(array: &mut [i32], min: i32, max: i32) {
super::counting_sort(array, min, max);
meta::test_utils::check_sorted(array);
}
#[test]
fn rosetta_vector() {
let numbers = &mut [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
check_sort(numbers, -31, 782);
}
#[test]
fn one_element_vector() {
let numbers = &mut [0i32];
check_sort(numbers, 0, 0);
}
#[test]
fn repeat_vector() {
let numbers = &mut [1i32, 1, 1, 1, 1];
check_sort(numbers, 1, 1);
}
#[test]
fn worst_case_vector() {
let numbers = &mut [20i32, 10, 0, -1, -5];
check_sort(numbers, -5, 20);
}
#[test]
fn already_sorted_vector() {
let numbers = &mut [-1i32, 0, 3, 6, 99];
check_sort(numbers, -1, 99);
}
#[test]
#[should_panic]
fn bad_min() {
let numbers = &mut [-1i32, 0, 3, 6, 99];
check_sort(numbers, 2, 99);
}
}
| {
return;
} | conditional_block |
struct-style-enum.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// ignore-android: FIXME(#10381)
// compile-flags:-g
// gdb-command:set print union on
// gdb-command:rbreak zzz
// gdb-command:run
// gdb-command:finish
// gdb-command:print case1
// gdb-check:$1 = {{Case1, a = 0, b = 31868, c = 31868, d = 31868, e = 31868}, {Case1, a = 0, b = 2088533116, c = 2088533116}, {Case1, a = 0, b = 8970181431921507452}}
// gdb-command:print case2
// gdb-check:$2 = {{Case2, a = 0, b = 4369, c = 4369, d = 4369, e = 4369}, {Case2, a = 0, b = 286331153, c = 286331153}, {Case2, a = 0, b = 1229782938247303441}}
// gdb-command:print case3
// gdb-check:$3 = {{Case3, a = 0, b = 22873, c = 22873, d = 22873, e = 22873}, {Case3, a = 0, b = 1499027801, c = 1499027801}, {Case3, a = 0, b = 6438275382588823897}}
// gdb-command:print univariant
// gdb-check:$4 = {a = -1}
#![allow(unused_variable)]
#![feature(struct_variant)]
// The first element is to ensure proper alignment, irrespective of the machines word size. Since
// the size of the discriminant value is machine dependent, this has be taken into account when
// datatype layout should be predictable as in this case.
enum | {
Case1 { a: u64, b: u16, c: u16, d: u16, e: u16},
Case2 { a: u64, b: u32, c: u32},
Case3 { a: u64, b: u64 }
}
enum Univariant {
TheOnlyCase { a: i64 }
}
fn main() {
// In order to avoid endianess trouble all of the following test values consist of a single
// repeated byte. This way each interpretation of the union should look the same, no matter if
// this is a big or little endian machine.
// 0b0111110001111100011111000111110001111100011111000111110001111100 = 8970181431921507452
// 0b01111100011111000111110001111100 = 2088533116
// 0b0111110001111100 = 31868
// 0b01111100 = 124
let case1 = Case1 { a: 0, b: 31868, c: 31868, d: 31868, e: 31868 };
// 0b0001000100010001000100010001000100010001000100010001000100010001 = 1229782938247303441
// 0b00010001000100010001000100010001 = 286331153
// 0b0001000100010001 = 4369
// 0b00010001 = 17
let case2 = Case2 { a: 0, b: 286331153, c: 286331153 };
// 0b0101100101011001010110010101100101011001010110010101100101011001 = 6438275382588823897
// 0b01011001010110010101100101011001 = 1499027801
// 0b0101100101011001 = 22873
// 0b01011001 = 89
let case3 = Case3 { a: 0, b: 6438275382588823897 };
let univariant = TheOnlyCase { a: -1 };
zzz();
}
fn zzz() {()}
| Regular | identifier_name |
struct-style-enum.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// ignore-android: FIXME(#10381)
// compile-flags:-g
// gdb-command:set print union on
// gdb-command:rbreak zzz
// gdb-command:run
// gdb-command:finish
// gdb-command:print case1
// gdb-check:$1 = {{Case1, a = 0, b = 31868, c = 31868, d = 31868, e = 31868}, {Case1, a = 0, b = 2088533116, c = 2088533116}, {Case1, a = 0, b = 8970181431921507452}}
// gdb-command:print case2
// gdb-check:$2 = {{Case2, a = 0, b = 4369, c = 4369, d = 4369, e = 4369}, {Case2, a = 0, b = 286331153, c = 286331153}, {Case2, a = 0, b = 1229782938247303441}}
// gdb-command:print case3
// gdb-check:$3 = {{Case3, a = 0, b = 22873, c = 22873, d = 22873, e = 22873}, {Case3, a = 0, b = 1499027801, c = 1499027801}, {Case3, a = 0, b = 6438275382588823897}}
// gdb-command:print univariant
// gdb-check:$4 = {a = -1} | // the size of the discriminant value is machine dependent, this has be taken into account when
// datatype layout should be predictable as in this case.
enum Regular {
Case1 { a: u64, b: u16, c: u16, d: u16, e: u16},
Case2 { a: u64, b: u32, c: u32},
Case3 { a: u64, b: u64 }
}
enum Univariant {
TheOnlyCase { a: i64 }
}
fn main() {
// In order to avoid endianess trouble all of the following test values consist of a single
// repeated byte. This way each interpretation of the union should look the same, no matter if
// this is a big or little endian machine.
// 0b0111110001111100011111000111110001111100011111000111110001111100 = 8970181431921507452
// 0b01111100011111000111110001111100 = 2088533116
// 0b0111110001111100 = 31868
// 0b01111100 = 124
let case1 = Case1 { a: 0, b: 31868, c: 31868, d: 31868, e: 31868 };
// 0b0001000100010001000100010001000100010001000100010001000100010001 = 1229782938247303441
// 0b00010001000100010001000100010001 = 286331153
// 0b0001000100010001 = 4369
// 0b00010001 = 17
let case2 = Case2 { a: 0, b: 286331153, c: 286331153 };
// 0b0101100101011001010110010101100101011001010110010101100101011001 = 6438275382588823897
// 0b01011001010110010101100101011001 = 1499027801
// 0b0101100101011001 = 22873
// 0b01011001 = 89
let case3 = Case3 { a: 0, b: 6438275382588823897 };
let univariant = TheOnlyCase { a: -1 };
zzz();
}
fn zzz() {()} |
#![allow(unused_variable)]
#![feature(struct_variant)]
// The first element is to ensure proper alignment, irrespective of the machines word size. Since | random_line_split |
parameters.rs | use http_types::Method;
use std::borrow::Cow;
#[derive(Clone, Debug)]
pub enum Parameters {
Query(String),
Body(String),
}
use std::{
iter::Map,
slice::Iter,
};
// This newtype is currently only used internally here, but we might want to move it elsewhere where
// it could be more useful because of genericity. We could also aim at reducing the amount of
// conversions in requests by having a type that only maps parameters once unless changed.
type ParamsMapper<'a, 'p, S, T> = Map<Iter<'a, (Cow<'p, str>, S)>, fn(&(Cow<'p, str>, S)) -> T>;
impl Parameters {
pub fn new<S: AsRef<str>>(method: &Method, params: &[(Cow<str>, S)]) -> Self {
let params_s = Self::params_to_query(params);
if Self::method_requires_body(method) {
Parameters::Body(params_s)
} else {
Parameters::Query(params_s)
}
}
#[inline]
fn | (method: &Method) -> bool {
match *method {
Method::GET | Method::HEAD | Method::DELETE => false,
_ => true,
}
}
pub fn path_and_query<'p>(&self, path: &'p str) -> Cow<'p, str> {
self.query().map_or_else(|| Cow::Borrowed(path),
|q| {
let mut url = path.to_string();
url.push('?');
url.push_str(q);
Cow::Owned(url)
})
}
pub fn uri_and_body<'p>(&self, path: &'p str) -> (Cow<'p, str>, Option<&str>) {
(self.path_and_query(path), self.body())
}
pub fn query(&self) -> Option<&str> {
match self {
Parameters::Query(query) => Some(query.as_str()),
_ => None,
}
}
pub fn body(&self) -> Option<&str> {
match self {
Parameters::Body(body) => Some(body.as_str()),
_ => None,
}
}
pub fn into_inner(self) -> String {
match self {
Parameters::Query(s) => s,
Parameters::Body(s) => s,
}
}
pub fn as_mut_string(&mut self) -> &mut String {
match self {
Parameters::Query(ref mut query) => query,
Parameters::Body(ref mut body) => body,
}
}
pub fn query_as_mut_string(&mut self) -> Option<&mut String> {
match self {
Parameters::Query(ref mut query) => Some(query),
_ => None,
}
}
pub fn body_as_mut_string(&mut self) -> Option<&mut String> {
match self {
Parameters::Body(ref mut body) => Some(body),
_ => None,
}
}
pub fn push<S: AsRef<str>>(&mut self, extra_params: &[(Cow<str>, S)]) {
let q = Self::params_to_query(extra_params);
let s = self.as_mut_string();
if!s.is_empty() {
s.push('&');
}
s.push_str(q.as_str());
}
fn params_to_string_collection<'p, 'a: 'p, S: AsRef<str>>(params: &'a [(Cow<str>, S)])
-> ParamsMapper<'a, 'p, S, String> {
params.iter()
.map(|(k, v)| [k.as_ref().as_ref(), "=", v.as_ref()].concat())
}
fn params_to_vec<S: AsRef<str>>(params: &[(Cow<str>, S)]) -> Vec<String> {
Self::params_to_string_collection(params).collect()
}
fn params_to_query<S: AsRef<str>>(params: &[(Cow<str>, S)]) -> String {
Self::params_to_vec(params).join("&")
}
}
#[cfg(all(test, feature = "nightly"))]
mod benches {
use super::*;
use test::Bencher;
#[bench]
fn bench_params_to_query(b: &mut Bencher) {
let params_str = (1..10).map(|i| (format!("unakey{}", i), format!("unvalor{}", i)))
.collect::<Vec<_>>();
let params = params_str.iter()
.map(|(k, v)| (k.as_str().into(), v.as_str()))
.collect::<Vec<(Cow<str>, &str)>>();
b.iter(|| Parameters::params_to_query(¶ms));
}
}
| method_requires_body | identifier_name |
parameters.rs | use http_types::Method;
use std::borrow::Cow;
#[derive(Clone, Debug)]
pub enum Parameters {
Query(String),
Body(String),
}
use std::{
iter::Map,
slice::Iter,
};
// This newtype is currently only used internally here, but we might want to move it elsewhere where
// it could be more useful because of genericity. We could also aim at reducing the amount of
// conversions in requests by having a type that only maps parameters once unless changed.
type ParamsMapper<'a, 'p, S, T> = Map<Iter<'a, (Cow<'p, str>, S)>, fn(&(Cow<'p, str>, S)) -> T>;
impl Parameters {
pub fn new<S: AsRef<str>>(method: &Method, params: &[(Cow<str>, S)]) -> Self {
let params_s = Self::params_to_query(params);
if Self::method_requires_body(method) {
Parameters::Body(params_s)
} else {
Parameters::Query(params_s)
}
}
#[inline]
fn method_requires_body(method: &Method) -> bool {
match *method {
Method::GET | Method::HEAD | Method::DELETE => false,
_ => true,
}
}
pub fn path_and_query<'p>(&self, path: &'p str) -> Cow<'p, str> {
self.query().map_or_else(|| Cow::Borrowed(path),
|q| {
let mut url = path.to_string();
url.push('?');
url.push_str(q);
Cow::Owned(url)
})
}
pub fn uri_and_body<'p>(&self, path: &'p str) -> (Cow<'p, str>, Option<&str>) {
(self.path_and_query(path), self.body())
}
pub fn query(&self) -> Option<&str> {
match self {
Parameters::Query(query) => Some(query.as_str()),
_ => None,
}
}
pub fn body(&self) -> Option<&str> {
match self {
Parameters::Body(body) => Some(body.as_str()),
_ => None,
}
}
pub fn into_inner(self) -> String {
match self {
Parameters::Query(s) => s,
Parameters::Body(s) => s,
}
}
pub fn as_mut_string(&mut self) -> &mut String {
match self {
Parameters::Query(ref mut query) => query,
Parameters::Body(ref mut body) => body,
}
}
pub fn query_as_mut_string(&mut self) -> Option<&mut String> {
match self {
Parameters::Query(ref mut query) => Some(query),
_ => None,
}
}
pub fn body_as_mut_string(&mut self) -> Option<&mut String> {
match self {
Parameters::Body(ref mut body) => Some(body),
_ => None,
}
}
pub fn push<S: AsRef<str>>(&mut self, extra_params: &[(Cow<str>, S)]) {
let q = Self::params_to_query(extra_params);
let s = self.as_mut_string();
if!s.is_empty() {
s.push('&');
}
s.push_str(q.as_str());
}
fn params_to_string_collection<'p, 'a: 'p, S: AsRef<str>>(params: &'a [(Cow<str>, S)])
-> ParamsMapper<'a, 'p, S, String> {
params.iter()
.map(|(k, v)| [k.as_ref().as_ref(), "=", v.as_ref()].concat())
}
fn params_to_vec<S: AsRef<str>>(params: &[(Cow<str>, S)]) -> Vec<String> {
Self::params_to_string_collection(params).collect()
}
fn params_to_query<S: AsRef<str>>(params: &[(Cow<str>, S)]) -> String {
Self::params_to_vec(params).join("&")
}
}
#[cfg(all(test, feature = "nightly"))]
mod benches {
use super::*;
use test::Bencher;
#[bench]
fn bench_params_to_query(b: &mut Bencher) |
}
| {
let params_str = (1..10).map(|i| (format!("unakey{}", i), format!("unvalor{}", i)))
.collect::<Vec<_>>();
let params = params_str.iter()
.map(|(k, v)| (k.as_str().into(), v.as_str()))
.collect::<Vec<(Cow<str>, &str)>>();
b.iter(|| Parameters::params_to_query(¶ms));
} | identifier_body |
parameters.rs | use http_types::Method;
use std::borrow::Cow;
#[derive(Clone, Debug)]
pub enum Parameters {
Query(String),
Body(String),
}
use std::{
iter::Map,
slice::Iter,
};
// This newtype is currently only used internally here, but we might want to move it elsewhere where
// it could be more useful because of genericity. We could also aim at reducing the amount of
// conversions in requests by having a type that only maps parameters once unless changed.
type ParamsMapper<'a, 'p, S, T> = Map<Iter<'a, (Cow<'p, str>, S)>, fn(&(Cow<'p, str>, S)) -> T>;
impl Parameters {
pub fn new<S: AsRef<str>>(method: &Method, params: &[(Cow<str>, S)]) -> Self {
let params_s = Self::params_to_query(params);
if Self::method_requires_body(method) {
Parameters::Body(params_s)
} else {
Parameters::Query(params_s)
}
}
#[inline]
fn method_requires_body(method: &Method) -> bool {
match *method {
Method::GET | Method::HEAD | Method::DELETE => false,
_ => true,
}
}
pub fn path_and_query<'p>(&self, path: &'p str) -> Cow<'p, str> {
self.query().map_or_else(|| Cow::Borrowed(path),
|q| {
let mut url = path.to_string();
url.push('?');
url.push_str(q);
Cow::Owned(url)
})
}
pub fn uri_and_body<'p>(&self, path: &'p str) -> (Cow<'p, str>, Option<&str>) {
(self.path_and_query(path), self.body())
}
pub fn query(&self) -> Option<&str> {
match self {
Parameters::Query(query) => Some(query.as_str()),
_ => None,
}
}
pub fn body(&self) -> Option<&str> {
match self {
Parameters::Body(body) => Some(body.as_str()),
_ => None,
}
}
pub fn into_inner(self) -> String {
match self {
Parameters::Query(s) => s,
Parameters::Body(s) => s,
}
}
pub fn as_mut_string(&mut self) -> &mut String {
match self {
Parameters::Query(ref mut query) => query,
Parameters::Body(ref mut body) => body,
}
}
pub fn query_as_mut_string(&mut self) -> Option<&mut String> {
match self {
Parameters::Query(ref mut query) => Some(query),
_ => None,
}
}
pub fn body_as_mut_string(&mut self) -> Option<&mut String> {
match self {
Parameters::Body(ref mut body) => Some(body),
_ => None,
}
}
pub fn push<S: AsRef<str>>(&mut self, extra_params: &[(Cow<str>, S)]) {
let q = Self::params_to_query(extra_params);
let s = self.as_mut_string();
if!s.is_empty() {
s.push('&');
}
s.push_str(q.as_str());
}
fn params_to_string_collection<'p, 'a: 'p, S: AsRef<str>>(params: &'a [(Cow<str>, S)])
-> ParamsMapper<'a, 'p, S, String> {
params.iter()
.map(|(k, v)| [k.as_ref().as_ref(), "=", v.as_ref()].concat())
}
fn params_to_vec<S: AsRef<str>>(params: &[(Cow<str>, S)]) -> Vec<String> { | fn params_to_query<S: AsRef<str>>(params: &[(Cow<str>, S)]) -> String {
Self::params_to_vec(params).join("&")
}
}
#[cfg(all(test, feature = "nightly"))]
mod benches {
use super::*;
use test::Bencher;
#[bench]
fn bench_params_to_query(b: &mut Bencher) {
let params_str = (1..10).map(|i| (format!("unakey{}", i), format!("unvalor{}", i)))
.collect::<Vec<_>>();
let params = params_str.iter()
.map(|(k, v)| (k.as_str().into(), v.as_str()))
.collect::<Vec<(Cow<str>, &str)>>();
b.iter(|| Parameters::params_to_query(¶ms));
}
} | Self::params_to_string_collection(params).collect()
}
| random_line_split |
total_count.rs | use collectors::{Collector, DocumentMatch};
#[derive(Debug)]
pub struct TotalCountCollector {
total_count: u64,
}
impl TotalCountCollector {
pub fn new() -> TotalCountCollector {
TotalCountCollector {
total_count: 0,
}
}
pub fn get_total_count(&self) -> u64 {
self.total_count
}
}
impl Collector for TotalCountCollector {
fn needs_score(&self) -> bool {
false
} |
#[cfg(test)]
mod tests {
use collectors::{Collector, DocumentMatch};
use super::TotalCountCollector;
#[test]
fn test_total_count_collector_inital_state() {
let collector = TotalCountCollector::new();
assert_eq!(collector.get_total_count(), 0);
}
#[test]
fn test_total_count_collector_needs_score() {
let collector = TotalCountCollector::new();
assert_eq!(collector.needs_score(), false);
}
#[test]
fn test_total_count_collector_collect() {
let mut collector = TotalCountCollector::new();
collector.collect(DocumentMatch::new_unscored(0));
collector.collect(DocumentMatch::new_unscored(1));
collector.collect(DocumentMatch::new_unscored(2));
assert_eq!(collector.get_total_count(), 3);
}
} |
fn collect(&mut self, _doc: DocumentMatch) {
self.total_count += 1;
}
} | random_line_split |
total_count.rs | use collectors::{Collector, DocumentMatch};
#[derive(Debug)]
pub struct TotalCountCollector {
total_count: u64,
}
impl TotalCountCollector {
pub fn new() -> TotalCountCollector {
TotalCountCollector {
total_count: 0,
}
}
pub fn get_total_count(&self) -> u64 {
self.total_count
}
}
impl Collector for TotalCountCollector {
fn needs_score(&self) -> bool {
false
}
fn collect(&mut self, _doc: DocumentMatch) {
self.total_count += 1;
}
}
#[cfg(test)]
mod tests {
use collectors::{Collector, DocumentMatch};
use super::TotalCountCollector;
#[test]
fn test_total_count_collector_inital_state() {
let collector = TotalCountCollector::new();
assert_eq!(collector.get_total_count(), 0);
}
#[test]
fn | () {
let collector = TotalCountCollector::new();
assert_eq!(collector.needs_score(), false);
}
#[test]
fn test_total_count_collector_collect() {
let mut collector = TotalCountCollector::new();
collector.collect(DocumentMatch::new_unscored(0));
collector.collect(DocumentMatch::new_unscored(1));
collector.collect(DocumentMatch::new_unscored(2));
assert_eq!(collector.get_total_count(), 3);
}
}
| test_total_count_collector_needs_score | identifier_name |
destructure-trait-ref.rs | // The regression test for #15031 to make sure destructuring trait
// reference work properly.
#![feature(box_patterns)]
#![feature(box_syntax)]
trait T { fn foo(&self) | }
impl T for isize {}
fn main() {
// For an expression of the form:
//
// let &...&x = &..&SomeTrait;
//
// Say we have n `&` at the left hand and m `&` right hand, then:
// if n < m, we are golden;
// if n == m, it's a derefing non-derefable type error;
// if n > m, it's a type mismatch error.
// n < m
let &x = &(&1isize as &dyn T);
let &x = &&(&1isize as &dyn T);
let &&x = &&(&1isize as &dyn T);
// n == m
let &x = &1isize as &dyn T; //~ ERROR type `&dyn T` cannot be dereferenced
let &&x = &(&1isize as &dyn T); //~ ERROR type `&dyn T` cannot be dereferenced
let box x = box 1isize as Box<dyn T>;
//~^ ERROR type `Box<dyn T>` cannot be dereferenced
// n > m
let &&x = &1isize as &dyn T;
//~^ ERROR mismatched types
//~| expected trait object `dyn T`
//~| found reference `&_`
let &&&x = &(&1isize as &dyn T);
//~^ ERROR mismatched types
//~| expected trait object `dyn T`
//~| found reference `&_`
let box box x = box 1isize as Box<dyn T>;
//~^ ERROR mismatched types
//~| expected trait object `dyn T`
//~| found struct `Box<_>`
}
| {} | identifier_body |
destructure-trait-ref.rs | // The regression test for #15031 to make sure destructuring trait
// reference work properly.
#![feature(box_patterns)]
#![feature(box_syntax)]
trait T { fn | (&self) {} }
impl T for isize {}
fn main() {
// For an expression of the form:
//
// let &...&x = &..&SomeTrait;
//
// Say we have n `&` at the left hand and m `&` right hand, then:
// if n < m, we are golden;
// if n == m, it's a derefing non-derefable type error;
// if n > m, it's a type mismatch error.
// n < m
let &x = &(&1isize as &dyn T);
let &x = &&(&1isize as &dyn T);
let &&x = &&(&1isize as &dyn T);
// n == m
let &x = &1isize as &dyn T; //~ ERROR type `&dyn T` cannot be dereferenced
let &&x = &(&1isize as &dyn T); //~ ERROR type `&dyn T` cannot be dereferenced
let box x = box 1isize as Box<dyn T>;
//~^ ERROR type `Box<dyn T>` cannot be dereferenced
// n > m
let &&x = &1isize as &dyn T;
//~^ ERROR mismatched types
//~| expected trait object `dyn T`
//~| found reference `&_`
let &&&x = &(&1isize as &dyn T);
//~^ ERROR mismatched types
//~| expected trait object `dyn T`
//~| found reference `&_`
let box box x = box 1isize as Box<dyn T>;
//~^ ERROR mismatched types
//~| expected trait object `dyn T`
//~| found struct `Box<_>`
}
| foo | identifier_name |
destructure-trait-ref.rs | // The regression test for #15031 to make sure destructuring trait
// reference work properly.
#![feature(box_patterns)] | trait T { fn foo(&self) {} }
impl T for isize {}
fn main() {
// For an expression of the form:
//
// let &...&x = &..&SomeTrait;
//
// Say we have n `&` at the left hand and m `&` right hand, then:
// if n < m, we are golden;
// if n == m, it's a derefing non-derefable type error;
// if n > m, it's a type mismatch error.
// n < m
let &x = &(&1isize as &dyn T);
let &x = &&(&1isize as &dyn T);
let &&x = &&(&1isize as &dyn T);
// n == m
let &x = &1isize as &dyn T; //~ ERROR type `&dyn T` cannot be dereferenced
let &&x = &(&1isize as &dyn T); //~ ERROR type `&dyn T` cannot be dereferenced
let box x = box 1isize as Box<dyn T>;
//~^ ERROR type `Box<dyn T>` cannot be dereferenced
// n > m
let &&x = &1isize as &dyn T;
//~^ ERROR mismatched types
//~| expected trait object `dyn T`
//~| found reference `&_`
let &&&x = &(&1isize as &dyn T);
//~^ ERROR mismatched types
//~| expected trait object `dyn T`
//~| found reference `&_`
let box box x = box 1isize as Box<dyn T>;
//~^ ERROR mismatched types
//~| expected trait object `dyn T`
//~| found struct `Box<_>`
} | #![feature(box_syntax)]
| random_line_split |
lib.rs | // Helianto -- static website generator
// Copyright © 2015-2016 Mickaël RAYBAUD-ROIG
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
extern crate chrono;
extern crate handlebars;
extern crate num;
extern crate pulldown_cmark;
extern crate regex;
extern crate serde;
extern crate toml;
extern crate walkdir;
#[macro_use]
extern crate log;
mod document;
mod error;
mod generators;
pub mod metadata;
pub mod readers;
mod settings;
mod site;
mod templates;
mod utils;
use handlebars::Handlebars;
use std::collections::HashMap;
use std::fs;
use std::fs::File;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::rc::Rc;
use walkdir::{DirEntry, WalkDir};
pub use crate::document::{Document, DocumentContent, DocumentMetadata};
pub use crate::error::{Error, Result};
pub use crate::generators::Generator;
use crate::readers::Reader;
pub use crate::settings::Settings;
pub use crate::site::Site;
use crate::templates::Context;
pub struct Compiler {
pub settings: Settings,
pub site: Site,
handlebars: Handlebars<'static>,
readers: HashMap<String, Rc<dyn Reader>>,
generators: Vec<Rc<dyn Generator>>,
documents: HashMap<String, Rc<DocumentMetadata>>,
}
impl Compiler {
pub fn new(settings: &Settings) -> Compiler {
let mut compiler = Compiler {
settings: settings.clone(),
readers: HashMap::new(),
handlebars: Handlebars::new(),
site: Site::new(settings),
documents: HashMap::new(),
generators: Vec::new(),
};
compiler.add_reader::<readers::MarkdownReader>();
compiler.add_generator::<generators::IndexGenerator>();
compiler
}
fn check_settings(&self) -> Result<()> {
let Settings {
ref source_dir,
ref output_dir,
..
} = self.settings;
if!source_dir.is_dir() {
return Err(Error::Settings {
message: format!("{} must be an existing directory", source_dir.display()),
});
}
if output_dir.exists() &&!output_dir.is_dir() {
return Err(Error::Settings {
message: format!("{} must be a directory", output_dir.display()),
});
}
Ok(())
}
pub fn get_reader(&self, path: &Path) -> Option<Rc<dyn Reader>> {
path.extension()
.and_then(|extension| extension.to_str())
.and_then(|extension_str| self.readers.get(extension_str))
.cloned()
}
pub fn add_reader<T: Reader +'static>(&mut self) {
let reader = Rc::new(T::new(&self.settings));
for &extension in T::extensions() {
self.readers.insert(extension.into(), reader.clone());
}
}
pub fn add_generator<T: Generator +'static>(&mut self) {
self.generators.push(Rc::new(T::new()));
}
fn load_templates(&mut self) -> Result<()> {
self.handlebars.clear_templates();
templates::register_helpers(&mut self.handlebars);
let loader = &mut templates::Loader::new(&mut self.handlebars);
loader.load_builtin_templates();
let templates_dir = self.settings.source_dir.join("_layouts");
if templates_dir.is_dir() {
loader.load_templates(&templates_dir);
}
Ok(())
}
fn render_context(&self, context: Context, path: &Path) -> Result<()> {
let output: String = self.handlebars.render("page.html", &context)
.map_err(|err| Error::Render {
cause: Box::new(err)
})?;
let dest_file = self.settings.output_dir.join(&path);
let dest_dir = dest_file.parent().unwrap();
fs::create_dir_all(&dest_dir)
.and_then(|_| {
let mut fd = File::create(&dest_file)?;
fd.write(output.as_ref())?;
fd.sync_data()?;
Ok(())
})
.map_err(|err| Error::Output {
dest: dest_dir.into(),
cause: Box::new(err),
})
}
fn build_document(&mut self, reader: Rc<dyn Reader>, path: &Path) -> Result<()> {
let (body, metadata) = reader.load(path)?;
let dest = path
.strip_prefix(&self.settings.source_dir)
.map(|relpath| relpath.with_extension("html"))
.unwrap();
let document = Document {
metadata: DocumentMetadata {
url: dest.to_str().unwrap().into(),
.. DocumentMetadata::from_raw(metadata.into_iter())?
},
content: DocumentContent::from(body),
};
debug!(
"Rendering document {} in {}...",
path.display(),
dest.display()
);
self.render_context(Context::new(&self.site, &document), &dest)
.and_then(|_| {
self.documents.insert(
dest.to_str().unwrap().into(),
Rc::new(document.metadata.clone()),
);
Ok(())
})
}
fn copy_file(&mut self, path: &Path) -> Result<()> {
let dest = path
.strip_prefix(&self.settings.source_dir)
.map(|relpath| self.settings.output_dir.join(relpath))
.unwrap();
let dest_dir = dest.parent().unwrap();
fs::create_dir_all(&dest_dir)
.and_then(|_| fs::copy(path, &dest))
.and_then(|_| {
debug!("Copying {} to {}", path.display(), dest.display());
Ok(())
})
.map_err(|err| Error::Copy {
from: path.into(),
to: dest_dir.into(),
cause: Box::new(err),
})
}
fn run_generators(&mut self) -> Result<()> {
let documents: Vec<Rc<DocumentMetadata>> = self.documents.values().cloned().collect();
for generator in self.generators.iter() {
let generated_docs = generator.generate(documents.as_ref())?;
for generated_doc in generated_docs.iter() {
if self.documents.contains_key(&generated_doc.metadata.url) {
continue;
}
trace!("Running generator");
let dest = utils::remove_path_prefix(&generated_doc.metadata.url);
if let Err(e) = self.render_context(Context::new(&self.site, generated_doc), &dest)
{
return Err(e);
}
}
}
Ok(())
}
pub fn ru | mut self) -> Result<()> {
self.check_settings()?;
self.load_templates()?;
let entries = WalkDir::new(&self.settings.source_dir)
.min_depth(1)
.max_depth(self.settings.max_depth)
.follow_links(self.settings.follow_links)
.into_iter();
for entry in entries.filter_entry(filter_entry) {
let entry = match entry {
Err(_) => continue,
Ok(e) => {
if e.file_type().is_dir() {
continue;
} else {
PathBuf::from(e.path())
}
}
};
let result = match self.get_reader(&entry) {
Some(reader) => self.build_document(reader.clone(), &entry),
None => self.copy_file(&entry),
};
if let Err(err) = result {
error!("{}", err);
}
}
self.run_generators()?;
Ok(())
}
}
pub fn filter_entry(entry: &DirEntry) -> bool {
let file_type = entry.file_type();
if file_type.is_dir() || file_type.is_file() {
utils::is_public(&entry.path())
} else {
false
}
}
| n(& | identifier_name |
lib.rs | // Helianto -- static website generator
// Copyright © 2015-2016 Mickaël RAYBAUD-ROIG
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
extern crate chrono;
extern crate handlebars;
extern crate num;
extern crate pulldown_cmark;
extern crate regex;
extern crate serde;
extern crate toml;
extern crate walkdir;
#[macro_use]
extern crate log;
mod document;
mod error;
mod generators;
pub mod metadata;
pub mod readers;
mod settings;
mod site;
mod templates;
mod utils;
use handlebars::Handlebars;
use std::collections::HashMap;
use std::fs;
use std::fs::File;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::rc::Rc;
use walkdir::{DirEntry, WalkDir};
pub use crate::document::{Document, DocumentContent, DocumentMetadata};
pub use crate::error::{Error, Result};
pub use crate::generators::Generator;
use crate::readers::Reader;
pub use crate::settings::Settings;
pub use crate::site::Site;
use crate::templates::Context;
pub struct Compiler {
pub settings: Settings,
pub site: Site,
handlebars: Handlebars<'static>,
readers: HashMap<String, Rc<dyn Reader>>,
generators: Vec<Rc<dyn Generator>>,
documents: HashMap<String, Rc<DocumentMetadata>>,
}
impl Compiler {
pub fn new(settings: &Settings) -> Compiler {
let mut compiler = Compiler {
settings: settings.clone(),
readers: HashMap::new(),
handlebars: Handlebars::new(),
site: Site::new(settings),
documents: HashMap::new(),
generators: Vec::new(),
};
compiler.add_reader::<readers::MarkdownReader>();
compiler.add_generator::<generators::IndexGenerator>();
compiler
}
fn check_settings(&self) -> Result<()> {
let Settings {
ref source_dir,
ref output_dir,
..
} = self.settings;
if!source_dir.is_dir() {
return Err(Error::Settings {
message: format!("{} must be an existing directory", source_dir.display()),
});
}
if output_dir.exists() &&!output_dir.is_dir() {
return Err(Error::Settings {
message: format!("{} must be a directory", output_dir.display()),
});
}
Ok(())
}
pub fn get_reader(&self, path: &Path) -> Option<Rc<dyn Reader>> {
path.extension()
.and_then(|extension| extension.to_str())
.and_then(|extension_str| self.readers.get(extension_str))
.cloned()
}
pub fn add_reader<T: Reader +'static>(&mut self) {
let reader = Rc::new(T::new(&self.settings));
for &extension in T::extensions() {
self.readers.insert(extension.into(), reader.clone());
}
}
pub fn add_generator<T: Generator +'static>(&mut self) {
self.generators.push(Rc::new(T::new()));
}
fn load_templates(&mut self) -> Result<()> {
self.handlebars.clear_templates();
templates::register_helpers(&mut self.handlebars);
let loader = &mut templates::Loader::new(&mut self.handlebars);
loader.load_builtin_templates();
let templates_dir = self.settings.source_dir.join("_layouts");
if templates_dir.is_dir() {
loader.load_templates(&templates_dir);
}
Ok(())
}
fn render_context(&self, context: Context, path: &Path) -> Result<()> {
let output: String = self.handlebars.render("page.html", &context)
.map_err(|err| Error::Render {
cause: Box::new(err)
})?;
let dest_file = self.settings.output_dir.join(&path);
let dest_dir = dest_file.parent().unwrap();
fs::create_dir_all(&dest_dir)
.and_then(|_| {
let mut fd = File::create(&dest_file)?;
fd.write(output.as_ref())?;
fd.sync_data()?;
Ok(())
})
.map_err(|err| Error::Output {
dest: dest_dir.into(),
cause: Box::new(err),
})
}
fn build_document(&mut self, reader: Rc<dyn Reader>, path: &Path) -> Result<()> {
let (body, metadata) = reader.load(path)?;
let dest = path
.strip_prefix(&self.settings.source_dir)
.map(|relpath| relpath.with_extension("html"))
.unwrap();
let document = Document {
metadata: DocumentMetadata {
url: dest.to_str().unwrap().into(),
.. DocumentMetadata::from_raw(metadata.into_iter())?
},
content: DocumentContent::from(body),
};
debug!(
"Rendering document {} in {}...",
path.display(),
dest.display()
);
self.render_context(Context::new(&self.site, &document), &dest) | .and_then(|_| {
self.documents.insert(
dest.to_str().unwrap().into(),
Rc::new(document.metadata.clone()),
);
Ok(())
})
}
fn copy_file(&mut self, path: &Path) -> Result<()> {
let dest = path
.strip_prefix(&self.settings.source_dir)
.map(|relpath| self.settings.output_dir.join(relpath))
.unwrap();
let dest_dir = dest.parent().unwrap();
fs::create_dir_all(&dest_dir)
.and_then(|_| fs::copy(path, &dest))
.and_then(|_| {
debug!("Copying {} to {}", path.display(), dest.display());
Ok(())
})
.map_err(|err| Error::Copy {
from: path.into(),
to: dest_dir.into(),
cause: Box::new(err),
})
}
fn run_generators(&mut self) -> Result<()> {
let documents: Vec<Rc<DocumentMetadata>> = self.documents.values().cloned().collect();
for generator in self.generators.iter() {
let generated_docs = generator.generate(documents.as_ref())?;
for generated_doc in generated_docs.iter() {
if self.documents.contains_key(&generated_doc.metadata.url) {
continue;
}
trace!("Running generator");
let dest = utils::remove_path_prefix(&generated_doc.metadata.url);
if let Err(e) = self.render_context(Context::new(&self.site, generated_doc), &dest)
{
return Err(e);
}
}
}
Ok(())
}
pub fn run(&mut self) -> Result<()> {
self.check_settings()?;
self.load_templates()?;
let entries = WalkDir::new(&self.settings.source_dir)
.min_depth(1)
.max_depth(self.settings.max_depth)
.follow_links(self.settings.follow_links)
.into_iter();
for entry in entries.filter_entry(filter_entry) {
let entry = match entry {
Err(_) => continue,
Ok(e) => {
if e.file_type().is_dir() {
continue;
} else {
PathBuf::from(e.path())
}
}
};
let result = match self.get_reader(&entry) {
Some(reader) => self.build_document(reader.clone(), &entry),
None => self.copy_file(&entry),
};
if let Err(err) = result {
error!("{}", err);
}
}
self.run_generators()?;
Ok(())
}
}
pub fn filter_entry(entry: &DirEntry) -> bool {
let file_type = entry.file_type();
if file_type.is_dir() || file_type.is_file() {
utils::is_public(&entry.path())
} else {
false
}
} | random_line_split |
|
lib.rs | // Helianto -- static website generator
// Copyright © 2015-2016 Mickaël RAYBAUD-ROIG
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
extern crate chrono;
extern crate handlebars;
extern crate num;
extern crate pulldown_cmark;
extern crate regex;
extern crate serde;
extern crate toml;
extern crate walkdir;
#[macro_use]
extern crate log;
mod document;
mod error;
mod generators;
pub mod metadata;
pub mod readers;
mod settings;
mod site;
mod templates;
mod utils;
use handlebars::Handlebars;
use std::collections::HashMap;
use std::fs;
use std::fs::File;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::rc::Rc;
use walkdir::{DirEntry, WalkDir};
pub use crate::document::{Document, DocumentContent, DocumentMetadata};
pub use crate::error::{Error, Result};
pub use crate::generators::Generator;
use crate::readers::Reader;
pub use crate::settings::Settings;
pub use crate::site::Site;
use crate::templates::Context;
pub struct Compiler {
pub settings: Settings,
pub site: Site,
handlebars: Handlebars<'static>,
readers: HashMap<String, Rc<dyn Reader>>,
generators: Vec<Rc<dyn Generator>>,
documents: HashMap<String, Rc<DocumentMetadata>>,
}
impl Compiler {
pub fn new(settings: &Settings) -> Compiler {
let mut compiler = Compiler {
settings: settings.clone(),
readers: HashMap::new(),
handlebars: Handlebars::new(),
site: Site::new(settings),
documents: HashMap::new(),
generators: Vec::new(),
};
compiler.add_reader::<readers::MarkdownReader>();
compiler.add_generator::<generators::IndexGenerator>();
compiler
}
fn check_settings(&self) -> Result<()> {
let Settings {
ref source_dir,
ref output_dir,
..
} = self.settings;
if!source_dir.is_dir() {
return Err(Error::Settings {
message: format!("{} must be an existing directory", source_dir.display()),
});
}
if output_dir.exists() &&!output_dir.is_dir() {
return Err(Error::Settings {
message: format!("{} must be a directory", output_dir.display()),
});
}
Ok(())
}
pub fn get_reader(&self, path: &Path) -> Option<Rc<dyn Reader>> {
path.extension()
.and_then(|extension| extension.to_str())
.and_then(|extension_str| self.readers.get(extension_str))
.cloned()
}
pub fn add_reader<T: Reader +'static>(&mut self) {
| pub fn add_generator<T: Generator +'static>(&mut self) {
self.generators.push(Rc::new(T::new()));
}
fn load_templates(&mut self) -> Result<()> {
self.handlebars.clear_templates();
templates::register_helpers(&mut self.handlebars);
let loader = &mut templates::Loader::new(&mut self.handlebars);
loader.load_builtin_templates();
let templates_dir = self.settings.source_dir.join("_layouts");
if templates_dir.is_dir() {
loader.load_templates(&templates_dir);
}
Ok(())
}
fn render_context(&self, context: Context, path: &Path) -> Result<()> {
let output: String = self.handlebars.render("page.html", &context)
.map_err(|err| Error::Render {
cause: Box::new(err)
})?;
let dest_file = self.settings.output_dir.join(&path);
let dest_dir = dest_file.parent().unwrap();
fs::create_dir_all(&dest_dir)
.and_then(|_| {
let mut fd = File::create(&dest_file)?;
fd.write(output.as_ref())?;
fd.sync_data()?;
Ok(())
})
.map_err(|err| Error::Output {
dest: dest_dir.into(),
cause: Box::new(err),
})
}
fn build_document(&mut self, reader: Rc<dyn Reader>, path: &Path) -> Result<()> {
let (body, metadata) = reader.load(path)?;
let dest = path
.strip_prefix(&self.settings.source_dir)
.map(|relpath| relpath.with_extension("html"))
.unwrap();
let document = Document {
metadata: DocumentMetadata {
url: dest.to_str().unwrap().into(),
.. DocumentMetadata::from_raw(metadata.into_iter())?
},
content: DocumentContent::from(body),
};
debug!(
"Rendering document {} in {}...",
path.display(),
dest.display()
);
self.render_context(Context::new(&self.site, &document), &dest)
.and_then(|_| {
self.documents.insert(
dest.to_str().unwrap().into(),
Rc::new(document.metadata.clone()),
);
Ok(())
})
}
fn copy_file(&mut self, path: &Path) -> Result<()> {
let dest = path
.strip_prefix(&self.settings.source_dir)
.map(|relpath| self.settings.output_dir.join(relpath))
.unwrap();
let dest_dir = dest.parent().unwrap();
fs::create_dir_all(&dest_dir)
.and_then(|_| fs::copy(path, &dest))
.and_then(|_| {
debug!("Copying {} to {}", path.display(), dest.display());
Ok(())
})
.map_err(|err| Error::Copy {
from: path.into(),
to: dest_dir.into(),
cause: Box::new(err),
})
}
fn run_generators(&mut self) -> Result<()> {
let documents: Vec<Rc<DocumentMetadata>> = self.documents.values().cloned().collect();
for generator in self.generators.iter() {
let generated_docs = generator.generate(documents.as_ref())?;
for generated_doc in generated_docs.iter() {
if self.documents.contains_key(&generated_doc.metadata.url) {
continue;
}
trace!("Running generator");
let dest = utils::remove_path_prefix(&generated_doc.metadata.url);
if let Err(e) = self.render_context(Context::new(&self.site, generated_doc), &dest)
{
return Err(e);
}
}
}
Ok(())
}
pub fn run(&mut self) -> Result<()> {
self.check_settings()?;
self.load_templates()?;
let entries = WalkDir::new(&self.settings.source_dir)
.min_depth(1)
.max_depth(self.settings.max_depth)
.follow_links(self.settings.follow_links)
.into_iter();
for entry in entries.filter_entry(filter_entry) {
let entry = match entry {
Err(_) => continue,
Ok(e) => {
if e.file_type().is_dir() {
continue;
} else {
PathBuf::from(e.path())
}
}
};
let result = match self.get_reader(&entry) {
Some(reader) => self.build_document(reader.clone(), &entry),
None => self.copy_file(&entry),
};
if let Err(err) = result {
error!("{}", err);
}
}
self.run_generators()?;
Ok(())
}
}
pub fn filter_entry(entry: &DirEntry) -> bool {
let file_type = entry.file_type();
if file_type.is_dir() || file_type.is_file() {
utils::is_public(&entry.path())
} else {
false
}
}
| let reader = Rc::new(T::new(&self.settings));
for &extension in T::extensions() {
self.readers.insert(extension.into(), reader.clone());
}
}
| identifier_body |
orphan.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Orphan checker: every impl either implements a trait defined in this
//! crate or pertains to a type defined in this crate.
use rustc::traits;
use rustc::ty::{self, TyCtxt};
use rustc::hir::itemlikevisit::ItemLikeVisitor;
use rustc::hir;
pub fn check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let mut orphan = OrphanChecker { tcx };
tcx.hir().krate().visit_all_item_likes(&mut orphan);
}
struct OrphanChecker<'cx, 'tcx: 'cx> {
tcx: TyCtxt<'cx, 'tcx, 'tcx>,
}
impl<'cx, 'tcx, 'v> ItemLikeVisitor<'v> for OrphanChecker<'cx, 'tcx> {
/// Checks exactly one impl for orphan rules and other such
/// restrictions. In this fn, it can happen that multiple errors
/// apply to a specific impl, so just return after reporting one
/// to prevent inundating the user with a bunch of similar error
/// reports.
fn visit_item(&mut self, item: &hir::Item) {
let def_id = self.tcx.hir().local_def_id(item.id);
// "Trait" impl
if let hir::ItemKind::Impl(.., Some(_), _, _) = item.node {
debug!("coherence2::orphan check: trait impl {}",
self.tcx.hir().node_to_string(item.id));
let trait_ref = self.tcx.impl_trait_ref(def_id).unwrap();
let trait_def_id = trait_ref.def_id;
let cm = self.tcx.sess.source_map();
let sp = cm.def_span(item.span);
match traits::orphan_check(self.tcx, def_id) {
Ok(()) => {}
Err(traits::OrphanCheckErr::NoLocalInputType) => { | .span_label(sp, "impl doesn't use types inside crate")
.note("the impl does not reference any types defined in this crate")
.note("define and implement a trait or new type instead")
.emit();
return;
}
Err(traits::OrphanCheckErr::UncoveredTy(param_ty)) => {
struct_span_err!(self.tcx.sess,
sp,
E0210,
"type parameter `{}` must be used as the type parameter \
for some local type (e.g., `MyStruct<{}>`)",
param_ty,
param_ty)
.span_label(sp,
format!("type parameter `{}` must be used as the type \
parameter for some local type", param_ty))
.note("only traits defined in the current crate can be implemented \
for a type parameter")
.emit();
return;
}
}
// In addition to the above rules, we restrict impls of auto traits
// so that they can only be implemented on nominal types, such as structs,
// enums or foreign types. To see why this restriction exists, consider the
// following example (#22978). Imagine that crate A defines an auto trait
// `Foo` and a fn that operates on pairs of types:
//
// ```
// // Crate A
// auto trait Foo { }
// fn two_foos<A:Foo,B:Foo>(..) {
// one_foo::<(A,B)>(..)
// }
// fn one_foo<T:Foo>(..) {.. }
// ```
//
// This type-checks fine; in particular the fn
// `two_foos` is able to conclude that `(A,B):Foo`
// because `A:Foo` and `B:Foo`.
//
// Now imagine that crate B comes along and does the following:
//
// ```
// struct A { }
// struct B { }
// impl Foo for A { }
// impl Foo for B { }
// impl!Send for (A, B) { }
// ```
//
// This final impl is legal according to the orpan
// rules, but it invalidates the reasoning from
// `two_foos` above.
debug!("trait_ref={:?} trait_def_id={:?} trait_is_auto={}",
trait_ref,
trait_def_id,
self.tcx.trait_is_auto(trait_def_id));
if self.tcx.trait_is_auto(trait_def_id) &&
!trait_def_id.is_local() {
let self_ty = trait_ref.self_ty();
let opt_self_def_id = match self_ty.sty {
ty::Adt(self_def, _) => Some(self_def.did),
ty::Foreign(did) => Some(did),
_ => None,
};
let msg = match opt_self_def_id {
// We only want to permit nominal types, but not *all* nominal types.
// They must be local to the current crate, so that people
// can't do `unsafe impl Send for Rc<SomethingLocal>` or
// `impl!Send for Box<SomethingLocalAndSend>`.
Some(self_def_id) => {
if self_def_id.is_local() {
None
} else {
Some((
format!("cross-crate traits with a default impl, like `{}`, \
can only be implemented for a struct/enum type \
defined in the current crate",
self.tcx.item_path_str(trait_def_id)),
"can't implement cross-crate trait for type in another crate"
))
}
}
_ => {
Some((format!("cross-crate traits with a default impl, like `{}`, can \
only be implemented for a struct/enum type, not `{}`",
self.tcx.item_path_str(trait_def_id),
self_ty),
"can't implement cross-crate trait with a default impl for \
non-struct/enum type"))
}
};
if let Some((msg, label)) = msg {
struct_span_err!(self.tcx.sess, sp, E0321, "{}", msg)
.span_label(sp, label)
.emit();
return;
}
}
}
}
fn visit_trait_item(&mut self, _trait_item: &hir::TraitItem) {
}
fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) {
}
} | struct_span_err!(self.tcx.sess,
sp,
E0117,
"only traits defined in the current crate can be \
implemented for arbitrary types") | random_line_split |
orphan.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Orphan checker: every impl either implements a trait defined in this
//! crate or pertains to a type defined in this crate.
use rustc::traits;
use rustc::ty::{self, TyCtxt};
use rustc::hir::itemlikevisit::ItemLikeVisitor;
use rustc::hir;
pub fn check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) |
struct OrphanChecker<'cx, 'tcx: 'cx> {
tcx: TyCtxt<'cx, 'tcx, 'tcx>,
}
impl<'cx, 'tcx, 'v> ItemLikeVisitor<'v> for OrphanChecker<'cx, 'tcx> {
/// Checks exactly one impl for orphan rules and other such
/// restrictions. In this fn, it can happen that multiple errors
/// apply to a specific impl, so just return after reporting one
/// to prevent inundating the user with a bunch of similar error
/// reports.
fn visit_item(&mut self, item: &hir::Item) {
let def_id = self.tcx.hir().local_def_id(item.id);
// "Trait" impl
if let hir::ItemKind::Impl(.., Some(_), _, _) = item.node {
debug!("coherence2::orphan check: trait impl {}",
self.tcx.hir().node_to_string(item.id));
let trait_ref = self.tcx.impl_trait_ref(def_id).unwrap();
let trait_def_id = trait_ref.def_id;
let cm = self.tcx.sess.source_map();
let sp = cm.def_span(item.span);
match traits::orphan_check(self.tcx, def_id) {
Ok(()) => {}
Err(traits::OrphanCheckErr::NoLocalInputType) => {
struct_span_err!(self.tcx.sess,
sp,
E0117,
"only traits defined in the current crate can be \
implemented for arbitrary types")
.span_label(sp, "impl doesn't use types inside crate")
.note("the impl does not reference any types defined in this crate")
.note("define and implement a trait or new type instead")
.emit();
return;
}
Err(traits::OrphanCheckErr::UncoveredTy(param_ty)) => {
struct_span_err!(self.tcx.sess,
sp,
E0210,
"type parameter `{}` must be used as the type parameter \
for some local type (e.g., `MyStruct<{}>`)",
param_ty,
param_ty)
.span_label(sp,
format!("type parameter `{}` must be used as the type \
parameter for some local type", param_ty))
.note("only traits defined in the current crate can be implemented \
for a type parameter")
.emit();
return;
}
}
// In addition to the above rules, we restrict impls of auto traits
// so that they can only be implemented on nominal types, such as structs,
// enums or foreign types. To see why this restriction exists, consider the
// following example (#22978). Imagine that crate A defines an auto trait
// `Foo` and a fn that operates on pairs of types:
//
// ```
// // Crate A
// auto trait Foo { }
// fn two_foos<A:Foo,B:Foo>(..) {
// one_foo::<(A,B)>(..)
// }
// fn one_foo<T:Foo>(..) {.. }
// ```
//
// This type-checks fine; in particular the fn
// `two_foos` is able to conclude that `(A,B):Foo`
// because `A:Foo` and `B:Foo`.
//
// Now imagine that crate B comes along and does the following:
//
// ```
// struct A { }
// struct B { }
// impl Foo for A { }
// impl Foo for B { }
// impl!Send for (A, B) { }
// ```
//
// This final impl is legal according to the orpan
// rules, but it invalidates the reasoning from
// `two_foos` above.
debug!("trait_ref={:?} trait_def_id={:?} trait_is_auto={}",
trait_ref,
trait_def_id,
self.tcx.trait_is_auto(trait_def_id));
if self.tcx.trait_is_auto(trait_def_id) &&
!trait_def_id.is_local() {
let self_ty = trait_ref.self_ty();
let opt_self_def_id = match self_ty.sty {
ty::Adt(self_def, _) => Some(self_def.did),
ty::Foreign(did) => Some(did),
_ => None,
};
let msg = match opt_self_def_id {
// We only want to permit nominal types, but not *all* nominal types.
// They must be local to the current crate, so that people
// can't do `unsafe impl Send for Rc<SomethingLocal>` or
// `impl!Send for Box<SomethingLocalAndSend>`.
Some(self_def_id) => {
if self_def_id.is_local() {
None
} else {
Some((
format!("cross-crate traits with a default impl, like `{}`, \
can only be implemented for a struct/enum type \
defined in the current crate",
self.tcx.item_path_str(trait_def_id)),
"can't implement cross-crate trait for type in another crate"
))
}
}
_ => {
Some((format!("cross-crate traits with a default impl, like `{}`, can \
only be implemented for a struct/enum type, not `{}`",
self.tcx.item_path_str(trait_def_id),
self_ty),
"can't implement cross-crate trait with a default impl for \
non-struct/enum type"))
}
};
if let Some((msg, label)) = msg {
struct_span_err!(self.tcx.sess, sp, E0321, "{}", msg)
.span_label(sp, label)
.emit();
return;
}
}
}
}
fn visit_trait_item(&mut self, _trait_item: &hir::TraitItem) {
}
fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) {
}
}
| {
let mut orphan = OrphanChecker { tcx };
tcx.hir().krate().visit_all_item_likes(&mut orphan);
} | identifier_body |
orphan.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Orphan checker: every impl either implements a trait defined in this
//! crate or pertains to a type defined in this crate.
use rustc::traits;
use rustc::ty::{self, TyCtxt};
use rustc::hir::itemlikevisit::ItemLikeVisitor;
use rustc::hir;
pub fn check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let mut orphan = OrphanChecker { tcx };
tcx.hir().krate().visit_all_item_likes(&mut orphan);
}
struct | <'cx, 'tcx: 'cx> {
tcx: TyCtxt<'cx, 'tcx, 'tcx>,
}
impl<'cx, 'tcx, 'v> ItemLikeVisitor<'v> for OrphanChecker<'cx, 'tcx> {
/// Checks exactly one impl for orphan rules and other such
/// restrictions. In this fn, it can happen that multiple errors
/// apply to a specific impl, so just return after reporting one
/// to prevent inundating the user with a bunch of similar error
/// reports.
fn visit_item(&mut self, item: &hir::Item) {
let def_id = self.tcx.hir().local_def_id(item.id);
// "Trait" impl
if let hir::ItemKind::Impl(.., Some(_), _, _) = item.node {
debug!("coherence2::orphan check: trait impl {}",
self.tcx.hir().node_to_string(item.id));
let trait_ref = self.tcx.impl_trait_ref(def_id).unwrap();
let trait_def_id = trait_ref.def_id;
let cm = self.tcx.sess.source_map();
let sp = cm.def_span(item.span);
match traits::orphan_check(self.tcx, def_id) {
Ok(()) => {}
Err(traits::OrphanCheckErr::NoLocalInputType) => {
struct_span_err!(self.tcx.sess,
sp,
E0117,
"only traits defined in the current crate can be \
implemented for arbitrary types")
.span_label(sp, "impl doesn't use types inside crate")
.note("the impl does not reference any types defined in this crate")
.note("define and implement a trait or new type instead")
.emit();
return;
}
Err(traits::OrphanCheckErr::UncoveredTy(param_ty)) => {
struct_span_err!(self.tcx.sess,
sp,
E0210,
"type parameter `{}` must be used as the type parameter \
for some local type (e.g., `MyStruct<{}>`)",
param_ty,
param_ty)
.span_label(sp,
format!("type parameter `{}` must be used as the type \
parameter for some local type", param_ty))
.note("only traits defined in the current crate can be implemented \
for a type parameter")
.emit();
return;
}
}
// In addition to the above rules, we restrict impls of auto traits
// so that they can only be implemented on nominal types, such as structs,
// enums or foreign types. To see why this restriction exists, consider the
// following example (#22978). Imagine that crate A defines an auto trait
// `Foo` and a fn that operates on pairs of types:
//
// ```
// // Crate A
// auto trait Foo { }
// fn two_foos<A:Foo,B:Foo>(..) {
// one_foo::<(A,B)>(..)
// }
// fn one_foo<T:Foo>(..) {.. }
// ```
//
// This type-checks fine; in particular the fn
// `two_foos` is able to conclude that `(A,B):Foo`
// because `A:Foo` and `B:Foo`.
//
// Now imagine that crate B comes along and does the following:
//
// ```
// struct A { }
// struct B { }
// impl Foo for A { }
// impl Foo for B { }
// impl!Send for (A, B) { }
// ```
//
// This final impl is legal according to the orpan
// rules, but it invalidates the reasoning from
// `two_foos` above.
debug!("trait_ref={:?} trait_def_id={:?} trait_is_auto={}",
trait_ref,
trait_def_id,
self.tcx.trait_is_auto(trait_def_id));
if self.tcx.trait_is_auto(trait_def_id) &&
!trait_def_id.is_local() {
let self_ty = trait_ref.self_ty();
let opt_self_def_id = match self_ty.sty {
ty::Adt(self_def, _) => Some(self_def.did),
ty::Foreign(did) => Some(did),
_ => None,
};
let msg = match opt_self_def_id {
// We only want to permit nominal types, but not *all* nominal types.
// They must be local to the current crate, so that people
// can't do `unsafe impl Send for Rc<SomethingLocal>` or
// `impl!Send for Box<SomethingLocalAndSend>`.
Some(self_def_id) => {
if self_def_id.is_local() {
None
} else {
Some((
format!("cross-crate traits with a default impl, like `{}`, \
can only be implemented for a struct/enum type \
defined in the current crate",
self.tcx.item_path_str(trait_def_id)),
"can't implement cross-crate trait for type in another crate"
))
}
}
_ => {
Some((format!("cross-crate traits with a default impl, like `{}`, can \
only be implemented for a struct/enum type, not `{}`",
self.tcx.item_path_str(trait_def_id),
self_ty),
"can't implement cross-crate trait with a default impl for \
non-struct/enum type"))
}
};
if let Some((msg, label)) = msg {
struct_span_err!(self.tcx.sess, sp, E0321, "{}", msg)
.span_label(sp, label)
.emit();
return;
}
}
}
}
fn visit_trait_item(&mut self, _trait_item: &hir::TraitItem) {
}
fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) {
}
}
| OrphanChecker | identifier_name |
cast_sign_loss.rs | use clippy_utils::consts::{constant, Constant};
use clippy_utils::diagnostics::span_lint;
use clippy_utils::{method_chain_args, sext};
use if_chain::if_chain;
use rustc_hir::{Expr, ExprKind};
use rustc_lint::LateContext;
use rustc_middle::ty::{self, Ty};
use super::CAST_SIGN_LOSS;
pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_op: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) {
if should_lint(cx, cast_op, cast_from, cast_to) {
span_lint(
cx,
CAST_SIGN_LOSS,
expr.span,
&format!(
"casting `{}` to `{}` may lose the sign of the value",
cast_from, cast_to
),
);
}
}
| return false;
}
// Don't lint for positive constants.
let const_val = constant(cx, cx.typeck_results(), cast_op);
if_chain! {
if let Some((Constant::Int(n), _)) = const_val;
if let ty::Int(ity) = *cast_from.kind();
if sext(cx.tcx, n, ity) >= 0;
then {
return false;
}
}
// Don't lint for the result of methods that always return non-negative values.
if let ExprKind::MethodCall(path, _, _, _) = cast_op.kind {
let mut method_name = path.ident.name.as_str();
let allowed_methods = ["abs", "checked_abs", "rem_euclid", "checked_rem_euclid"];
if_chain! {
if method_name == "unwrap";
if let Some(arglist) = method_chain_args(cast_op, &["unwrap"]);
if let ExprKind::MethodCall(inner_path, _, _, _) = &arglist[0][0].kind;
then {
method_name = inner_path.ident.name.as_str();
}
}
if allowed_methods.iter().any(|&name| method_name == name) {
return false;
}
}
true
},
(false, true) =>!cast_to.is_signed(),
(_, _) => false,
}
} | fn should_lint(cx: &LateContext<'_>, cast_op: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) -> bool {
match (cast_from.is_integral(), cast_to.is_integral()) {
(true, true) => {
if !cast_from.is_signed() || cast_to.is_signed() { | random_line_split |
cast_sign_loss.rs | use clippy_utils::consts::{constant, Constant};
use clippy_utils::diagnostics::span_lint;
use clippy_utils::{method_chain_args, sext};
use if_chain::if_chain;
use rustc_hir::{Expr, ExprKind};
use rustc_lint::LateContext;
use rustc_middle::ty::{self, Ty};
use super::CAST_SIGN_LOSS;
pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_op: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) {
if should_lint(cx, cast_op, cast_from, cast_to) {
span_lint(
cx,
CAST_SIGN_LOSS,
expr.span,
&format!(
"casting `{}` to `{}` may lose the sign of the value",
cast_from, cast_to
),
);
}
}
fn should_lint(cx: &LateContext<'_>, cast_op: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) -> bool {
match (cast_from.is_integral(), cast_to.is_integral()) {
(true, true) => {
if!cast_from.is_signed() || cast_to.is_signed() {
return false;
}
// Don't lint for positive constants.
let const_val = constant(cx, cx.typeck_results(), cast_op);
if_chain! {
if let Some((Constant::Int(n), _)) = const_val;
if let ty::Int(ity) = *cast_from.kind();
if sext(cx.tcx, n, ity) >= 0;
then {
return false;
}
}
// Don't lint for the result of methods that always return non-negative values.
if let ExprKind::MethodCall(path, _, _, _) = cast_op.kind |
true
},
(false, true) =>!cast_to.is_signed(),
(_, _) => false,
}
}
| {
let mut method_name = path.ident.name.as_str();
let allowed_methods = ["abs", "checked_abs", "rem_euclid", "checked_rem_euclid"];
if_chain! {
if method_name == "unwrap";
if let Some(arglist) = method_chain_args(cast_op, &["unwrap"]);
if let ExprKind::MethodCall(inner_path, _, _, _) = &arglist[0][0].kind;
then {
method_name = inner_path.ident.name.as_str();
}
}
if allowed_methods.iter().any(|&name| method_name == name) {
return false;
}
} | conditional_block |
cast_sign_loss.rs | use clippy_utils::consts::{constant, Constant};
use clippy_utils::diagnostics::span_lint;
use clippy_utils::{method_chain_args, sext};
use if_chain::if_chain;
use rustc_hir::{Expr, ExprKind};
use rustc_lint::LateContext;
use rustc_middle::ty::{self, Ty};
use super::CAST_SIGN_LOSS;
pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_op: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) {
if should_lint(cx, cast_op, cast_from, cast_to) {
span_lint(
cx,
CAST_SIGN_LOSS,
expr.span,
&format!(
"casting `{}` to `{}` may lose the sign of the value",
cast_from, cast_to
),
);
}
}
fn | (cx: &LateContext<'_>, cast_op: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) -> bool {
match (cast_from.is_integral(), cast_to.is_integral()) {
(true, true) => {
if!cast_from.is_signed() || cast_to.is_signed() {
return false;
}
// Don't lint for positive constants.
let const_val = constant(cx, cx.typeck_results(), cast_op);
if_chain! {
if let Some((Constant::Int(n), _)) = const_val;
if let ty::Int(ity) = *cast_from.kind();
if sext(cx.tcx, n, ity) >= 0;
then {
return false;
}
}
// Don't lint for the result of methods that always return non-negative values.
if let ExprKind::MethodCall(path, _, _, _) = cast_op.kind {
let mut method_name = path.ident.name.as_str();
let allowed_methods = ["abs", "checked_abs", "rem_euclid", "checked_rem_euclid"];
if_chain! {
if method_name == "unwrap";
if let Some(arglist) = method_chain_args(cast_op, &["unwrap"]);
if let ExprKind::MethodCall(inner_path, _, _, _) = &arglist[0][0].kind;
then {
method_name = inner_path.ident.name.as_str();
}
}
if allowed_methods.iter().any(|&name| method_name == name) {
return false;
}
}
true
},
(false, true) =>!cast_to.is_signed(),
(_, _) => false,
}
}
| should_lint | identifier_name |
cast_sign_loss.rs | use clippy_utils::consts::{constant, Constant};
use clippy_utils::diagnostics::span_lint;
use clippy_utils::{method_chain_args, sext};
use if_chain::if_chain;
use rustc_hir::{Expr, ExprKind};
use rustc_lint::LateContext;
use rustc_middle::ty::{self, Ty};
use super::CAST_SIGN_LOSS;
pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_op: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) {
if should_lint(cx, cast_op, cast_from, cast_to) {
span_lint(
cx,
CAST_SIGN_LOSS,
expr.span,
&format!(
"casting `{}` to `{}` may lose the sign of the value",
cast_from, cast_to
),
);
}
}
fn should_lint(cx: &LateContext<'_>, cast_op: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) -> bool | let mut method_name = path.ident.name.as_str();
let allowed_methods = ["abs", "checked_abs", "rem_euclid", "checked_rem_euclid"];
if_chain! {
if method_name == "unwrap";
if let Some(arglist) = method_chain_args(cast_op, &["unwrap"]);
if let ExprKind::MethodCall(inner_path, _, _, _) = &arglist[0][0].kind;
then {
method_name = inner_path.ident.name.as_str();
}
}
if allowed_methods.iter().any(|&name| method_name == name) {
return false;
}
}
true
},
(false, true) =>!cast_to.is_signed(),
(_, _) => false,
}
}
| {
match (cast_from.is_integral(), cast_to.is_integral()) {
(true, true) => {
if !cast_from.is_signed() || cast_to.is_signed() {
return false;
}
// Don't lint for positive constants.
let const_val = constant(cx, cx.typeck_results(), cast_op);
if_chain! {
if let Some((Constant::Int(n), _)) = const_val;
if let ty::Int(ity) = *cast_from.kind();
if sext(cx.tcx, n, ity) >= 0;
then {
return false;
}
}
// Don't lint for the result of methods that always return non-negative values.
if let ExprKind::MethodCall(path, _, _, _) = cast_op.kind { | identifier_body |
history.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HistoryBinding;
use dom::bindings::codegen::Bindings::HistoryBinding::HistoryMethods;
use dom::bindings::codegen::Bindings::LocationBinding::LocationMethods;
use dom::bindings::codegen::Bindings::WindowBinding::WindowMethods;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JS, Root};
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::window::Window;
use ipc_channel::ipc;
use msg::constellation_msg::TraversalDirection;
use script_traits::ScriptMsg as ConstellationMsg;
// https://html.spec.whatwg.org/multipage/#the-history-interface
#[dom_struct]
pub struct History {
reflector_: Reflector,
window: JS<Window>,
}
impl History {
pub fn new_inherited(window: &Window) -> History {
History {
reflector_: Reflector::new(),
window: JS::from_ref(&window),
}
}
pub fn new(window: &Window) -> Root<History> |
}
impl History {
fn traverse_history(&self, direction: TraversalDirection) {
let pipeline = self.window.pipeline_id();
let msg = ConstellationMsg::TraverseHistory(Some(pipeline), direction);
let _ = self.window.constellation_chan().send(msg);
}
}
impl HistoryMethods for History {
// https://html.spec.whatwg.org/multipage/#dom-history-length
fn Length(&self) -> u32 {
let pipeline = self.window.pipeline_id();
let (sender, recv) = ipc::channel().expect("Failed to create channel to send jsh length.");
let msg = ConstellationMsg::JointSessionHistoryLength(pipeline, sender);
let _ = self.window.constellation_chan().send(msg);
recv.recv().unwrap()
}
// https://html.spec.whatwg.org/multipage/#dom-history-go
fn Go(&self, delta: i32) {
let direction = if delta > 0 {
TraversalDirection::Forward(delta as usize)
} else if delta < 0 {
TraversalDirection::Back(-delta as usize)
} else {
self.window.Location().Reload();
return;
};
self.traverse_history(direction);
}
// https://html.spec.whatwg.org/multipage/#dom-history-back
fn Back(&self) {
self.traverse_history(TraversalDirection::Back(1));
}
// https://html.spec.whatwg.org/multipage/#dom-history-forward
fn Forward(&self) {
self.traverse_history(TraversalDirection::Forward(1));
}
}
| {
reflect_dom_object(box History::new_inherited(window),
GlobalRef::Window(window),
HistoryBinding::Wrap)
} | identifier_body |
history.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HistoryBinding;
use dom::bindings::codegen::Bindings::HistoryBinding::HistoryMethods;
use dom::bindings::codegen::Bindings::LocationBinding::LocationMethods;
use dom::bindings::codegen::Bindings::WindowBinding::WindowMethods;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JS, Root};
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::window::Window;
use ipc_channel::ipc;
use msg::constellation_msg::TraversalDirection;
use script_traits::ScriptMsg as ConstellationMsg;
// https://html.spec.whatwg.org/multipage/#the-history-interface
#[dom_struct]
pub struct History {
reflector_: Reflector,
window: JS<Window>,
}
impl History {
pub fn new_inherited(window: &Window) -> History {
History {
reflector_: Reflector::new(),
window: JS::from_ref(&window),
}
}
pub fn new(window: &Window) -> Root<History> {
reflect_dom_object(box History::new_inherited(window),
GlobalRef::Window(window),
HistoryBinding::Wrap)
}
}
impl History {
fn traverse_history(&self, direction: TraversalDirection) {
let pipeline = self.window.pipeline_id();
let msg = ConstellationMsg::TraverseHistory(Some(pipeline), direction);
let _ = self.window.constellation_chan().send(msg);
}
}
impl HistoryMethods for History {
// https://html.spec.whatwg.org/multipage/#dom-history-length
fn Length(&self) -> u32 {
let pipeline = self.window.pipeline_id();
let (sender, recv) = ipc::channel().expect("Failed to create channel to send jsh length.");
let msg = ConstellationMsg::JointSessionHistoryLength(pipeline, sender);
let _ = self.window.constellation_chan().send(msg);
recv.recv().unwrap()
}
// https://html.spec.whatwg.org/multipage/#dom-history-go
fn Go(&self, delta: i32) {
let direction = if delta > 0 {
TraversalDirection::Forward(delta as usize)
} else if delta < 0 | else {
self.window.Location().Reload();
return;
};
self.traverse_history(direction);
}
// https://html.spec.whatwg.org/multipage/#dom-history-back
fn Back(&self) {
self.traverse_history(TraversalDirection::Back(1));
}
// https://html.spec.whatwg.org/multipage/#dom-history-forward
fn Forward(&self) {
self.traverse_history(TraversalDirection::Forward(1));
}
}
| {
TraversalDirection::Back(-delta as usize)
} | conditional_block |
history.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HistoryBinding;
use dom::bindings::codegen::Bindings::HistoryBinding::HistoryMethods;
use dom::bindings::codegen::Bindings::LocationBinding::LocationMethods;
use dom::bindings::codegen::Bindings::WindowBinding::WindowMethods;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JS, Root};
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::window::Window;
use ipc_channel::ipc;
use msg::constellation_msg::TraversalDirection;
use script_traits::ScriptMsg as ConstellationMsg;
// https://html.spec.whatwg.org/multipage/#the-history-interface
#[dom_struct]
pub struct History {
reflector_: Reflector,
window: JS<Window>,
}
impl History {
pub fn new_inherited(window: &Window) -> History {
History {
reflector_: Reflector::new(),
window: JS::from_ref(&window),
}
}
pub fn new(window: &Window) -> Root<History> {
reflect_dom_object(box History::new_inherited(window),
GlobalRef::Window(window),
HistoryBinding::Wrap)
}
}
impl History {
fn traverse_history(&self, direction: TraversalDirection) {
let pipeline = self.window.pipeline_id();
let msg = ConstellationMsg::TraverseHistory(Some(pipeline), direction);
let _ = self.window.constellation_chan().send(msg); | }
impl HistoryMethods for History {
// https://html.spec.whatwg.org/multipage/#dom-history-length
fn Length(&self) -> u32 {
let pipeline = self.window.pipeline_id();
let (sender, recv) = ipc::channel().expect("Failed to create channel to send jsh length.");
let msg = ConstellationMsg::JointSessionHistoryLength(pipeline, sender);
let _ = self.window.constellation_chan().send(msg);
recv.recv().unwrap()
}
// https://html.spec.whatwg.org/multipage/#dom-history-go
fn Go(&self, delta: i32) {
let direction = if delta > 0 {
TraversalDirection::Forward(delta as usize)
} else if delta < 0 {
TraversalDirection::Back(-delta as usize)
} else {
self.window.Location().Reload();
return;
};
self.traverse_history(direction);
}
// https://html.spec.whatwg.org/multipage/#dom-history-back
fn Back(&self) {
self.traverse_history(TraversalDirection::Back(1));
}
// https://html.spec.whatwg.org/multipage/#dom-history-forward
fn Forward(&self) {
self.traverse_history(TraversalDirection::Forward(1));
}
} | } | random_line_split |
history.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HistoryBinding;
use dom::bindings::codegen::Bindings::HistoryBinding::HistoryMethods;
use dom::bindings::codegen::Bindings::LocationBinding::LocationMethods;
use dom::bindings::codegen::Bindings::WindowBinding::WindowMethods;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JS, Root};
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::window::Window;
use ipc_channel::ipc;
use msg::constellation_msg::TraversalDirection;
use script_traits::ScriptMsg as ConstellationMsg;
// https://html.spec.whatwg.org/multipage/#the-history-interface
#[dom_struct]
pub struct | {
reflector_: Reflector,
window: JS<Window>,
}
impl History {
pub fn new_inherited(window: &Window) -> History {
History {
reflector_: Reflector::new(),
window: JS::from_ref(&window),
}
}
pub fn new(window: &Window) -> Root<History> {
reflect_dom_object(box History::new_inherited(window),
GlobalRef::Window(window),
HistoryBinding::Wrap)
}
}
impl History {
fn traverse_history(&self, direction: TraversalDirection) {
let pipeline = self.window.pipeline_id();
let msg = ConstellationMsg::TraverseHistory(Some(pipeline), direction);
let _ = self.window.constellation_chan().send(msg);
}
}
impl HistoryMethods for History {
// https://html.spec.whatwg.org/multipage/#dom-history-length
fn Length(&self) -> u32 {
let pipeline = self.window.pipeline_id();
let (sender, recv) = ipc::channel().expect("Failed to create channel to send jsh length.");
let msg = ConstellationMsg::JointSessionHistoryLength(pipeline, sender);
let _ = self.window.constellation_chan().send(msg);
recv.recv().unwrap()
}
// https://html.spec.whatwg.org/multipage/#dom-history-go
fn Go(&self, delta: i32) {
let direction = if delta > 0 {
TraversalDirection::Forward(delta as usize)
} else if delta < 0 {
TraversalDirection::Back(-delta as usize)
} else {
self.window.Location().Reload();
return;
};
self.traverse_history(direction);
}
// https://html.spec.whatwg.org/multipage/#dom-history-back
fn Back(&self) {
self.traverse_history(TraversalDirection::Back(1));
}
// https://html.spec.whatwg.org/multipage/#dom-history-forward
fn Forward(&self) {
self.traverse_history(TraversalDirection::Forward(1));
}
}
| History | identifier_name |
merkletree.rs | use services::ledger::merkletree::tree::{ Tree, LeavesIterator, LeavesIntoIterator, TreeLeafData };
use services::ledger::merkletree::proof::{ Proof, Lemma };
use utils::crypto::hash::{Hash, HASH_OUTPUT_LEN};
use errors::crypto::CryptoError;
/// A Merkle tree is a binary tree, with values of type `T` at the leafs,
/// and where every internal node holds the hash of the concatenation of the hashes of its children nodes.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct MerkleTree {
/// The root of the inner binary tree
pub root: Tree,
/// The height of the tree
pub height: usize,
/// The number of leaf nodes in the tree
pub count: usize,
/// The number of nodes in the tree
pub nodes_count: usize
}
impl MerkleTree {
/// Constructs a Merkle Tree from a vector of data blocks.
/// Returns `None` if `values` is empty.
pub fn from_vec(values: Vec<TreeLeafData>) -> Result<Self, CryptoError> {
if values.is_empty() {
return Ok(MerkleTree {
root: Tree::empty(Hash::hash_empty()?),
height: 0,
count: 0,
nodes_count: 0
});
}
let count = values.len();
let mut nodes_count = 0;
let mut height = 0;
let mut cur = Vec::with_capacity(count);
for v in values {
let leaf = Tree::new_leaf(v)?;
cur.push(leaf);
}
while cur.len() > 1 {
let mut next = Vec::new();
while!cur.is_empty() {
if cur.len() == 1 {
next.push(cur.remove(0));
}
else {
let left = cur.remove(0);
let right = cur.remove(0);
let combined_hash = Hash::hash_nodes(
left.hash(),
right.hash()
)?;
let node = Tree::Node {
hash: combined_hash.to_vec(),
left: Box::new(left),
right: Box::new(right)
};
next.push(node);
nodes_count+=1;
}
}
height += 1; | }
debug_assert!(cur.len() == 1);
let root = cur.remove(0);
Ok(MerkleTree {
root: root,
height: height,
count: count,
nodes_count: nodes_count
})
}
/// Returns the root hash of Merkle tree
pub fn root_hash(&self) -> &Vec<u8> {
self.root.hash()
}
/// Returns the hex root hash of Merkle tree
pub fn root_hash_hex(&self) -> String {
let rh = self.root.hash();
let mut ret:String = String::with_capacity(HASH_OUTPUT_LEN*2);
for i in rh {
ret.push_str(&format!("{:02x}", i));
}
return ret;
}
/// Returns the height of Merkle tree
pub fn height(&self) -> usize {
self.height
}
/// Returns the number of leaves in the Merkle tree
pub fn count(&self) -> usize {
self.count
}
/// Returns whether the Merkle tree is empty or not
pub fn is_empty(&self) -> bool {
self.count() == 0
}
/// Generate an inclusion proof for the given value.
/// Returns `None` if the given value is not found in the tree.
pub fn gen_proof(&self, value: TreeLeafData) -> Result<Option<Proof>, CryptoError> {
let root_hash = self.root_hash().clone();
let leaf_hash = Hash::hash_leaf(&value)?;
Ok(Lemma::new(&self.root, leaf_hash.to_vec().as_slice()).map(|lemma|
Proof::new(root_hash, lemma, value)
))
}
/// Creates an `Iterator` over the values contained in this Merkle tree.
pub fn iter(&self) -> LeavesIterator {
self.root.iter()
}
}
impl IntoIterator for MerkleTree {
type Item = TreeLeafData;
type IntoIter = LeavesIntoIterator;
/// Creates a consuming iterator, that is, one that moves each value out of the Merkle tree.
/// The tree cannot be used after calling this.
fn into_iter(self) -> Self::IntoIter {
self.root.into_iter()
}
}
impl <'a> IntoIterator for &'a MerkleTree {
type Item = &'a TreeLeafData;
type IntoIter = LeavesIterator<'a>;
/// Creates a borrowing `Iterator` over the values contained in this Merkle tree.
fn into_iter(self) -> Self::IntoIter {
self.root.iter()
}
} |
cur = next; | random_line_split |
merkletree.rs | use services::ledger::merkletree::tree::{ Tree, LeavesIterator, LeavesIntoIterator, TreeLeafData };
use services::ledger::merkletree::proof::{ Proof, Lemma };
use utils::crypto::hash::{Hash, HASH_OUTPUT_LEN};
use errors::crypto::CryptoError;
/// A Merkle tree is a binary tree, with values of type `T` at the leafs,
/// and where every internal node holds the hash of the concatenation of the hashes of its children nodes.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct MerkleTree {
/// The root of the inner binary tree
pub root: Tree,
/// The height of the tree
pub height: usize,
/// The number of leaf nodes in the tree
pub count: usize,
/// The number of nodes in the tree
pub nodes_count: usize
}
impl MerkleTree {
/// Constructs a Merkle Tree from a vector of data blocks.
/// Returns `None` if `values` is empty.
pub fn from_vec(values: Vec<TreeLeafData>) -> Result<Self, CryptoError> {
if values.is_empty() {
return Ok(MerkleTree {
root: Tree::empty(Hash::hash_empty()?),
height: 0,
count: 0,
nodes_count: 0
});
}
let count = values.len();
let mut nodes_count = 0;
let mut height = 0;
let mut cur = Vec::with_capacity(count);
for v in values {
let leaf = Tree::new_leaf(v)?;
cur.push(leaf);
}
while cur.len() > 1 {
let mut next = Vec::new();
while!cur.is_empty() {
if cur.len() == 1 {
next.push(cur.remove(0));
}
else {
let left = cur.remove(0);
let right = cur.remove(0);
let combined_hash = Hash::hash_nodes(
left.hash(),
right.hash()
)?;
let node = Tree::Node {
hash: combined_hash.to_vec(),
left: Box::new(left),
right: Box::new(right)
};
next.push(node);
nodes_count+=1;
}
}
height += 1;
cur = next;
}
debug_assert!(cur.len() == 1);
let root = cur.remove(0);
Ok(MerkleTree {
root: root,
height: height,
count: count,
nodes_count: nodes_count
})
}
/// Returns the root hash of Merkle tree
pub fn root_hash(&self) -> &Vec<u8> {
self.root.hash()
}
/// Returns the hex root hash of Merkle tree
pub fn root_hash_hex(&self) -> String {
let rh = self.root.hash();
let mut ret:String = String::with_capacity(HASH_OUTPUT_LEN*2);
for i in rh {
ret.push_str(&format!("{:02x}", i));
}
return ret;
}
/// Returns the height of Merkle tree
pub fn height(&self) -> usize {
self.height
}
/// Returns the number of leaves in the Merkle tree
pub fn count(&self) -> usize {
self.count
}
/// Returns whether the Merkle tree is empty or not
pub fn is_empty(&self) -> bool {
self.count() == 0
}
/// Generate an inclusion proof for the given value.
/// Returns `None` if the given value is not found in the tree.
pub fn gen_proof(&self, value: TreeLeafData) -> Result<Option<Proof>, CryptoError> {
let root_hash = self.root_hash().clone();
let leaf_hash = Hash::hash_leaf(&value)?;
Ok(Lemma::new(&self.root, leaf_hash.to_vec().as_slice()).map(|lemma|
Proof::new(root_hash, lemma, value)
))
}
/// Creates an `Iterator` over the values contained in this Merkle tree.
pub fn iter(&self) -> LeavesIterator {
self.root.iter()
}
}
impl IntoIterator for MerkleTree {
type Item = TreeLeafData;
type IntoIter = LeavesIntoIterator;
/// Creates a consuming iterator, that is, one that moves each value out of the Merkle tree.
/// The tree cannot be used after calling this.
fn into_iter(self) -> Self::IntoIter {
self.root.into_iter()
}
}
impl <'a> IntoIterator for &'a MerkleTree {
type Item = &'a TreeLeafData;
type IntoIter = LeavesIterator<'a>;
/// Creates a borrowing `Iterator` over the values contained in this Merkle tree.
fn | (self) -> Self::IntoIter {
self.root.iter()
}
}
| into_iter | identifier_name |
merkletree.rs | use services::ledger::merkletree::tree::{ Tree, LeavesIterator, LeavesIntoIterator, TreeLeafData };
use services::ledger::merkletree::proof::{ Proof, Lemma };
use utils::crypto::hash::{Hash, HASH_OUTPUT_LEN};
use errors::crypto::CryptoError;
/// A Merkle tree is a binary tree, with values of type `T` at the leafs,
/// and where every internal node holds the hash of the concatenation of the hashes of its children nodes.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct MerkleTree {
/// The root of the inner binary tree
pub root: Tree,
/// The height of the tree
pub height: usize,
/// The number of leaf nodes in the tree
pub count: usize,
/// The number of nodes in the tree
pub nodes_count: usize
}
impl MerkleTree {
/// Constructs a Merkle Tree from a vector of data blocks.
/// Returns `None` if `values` is empty.
pub fn from_vec(values: Vec<TreeLeafData>) -> Result<Self, CryptoError> {
if values.is_empty() |
let count = values.len();
let mut nodes_count = 0;
let mut height = 0;
let mut cur = Vec::with_capacity(count);
for v in values {
let leaf = Tree::new_leaf(v)?;
cur.push(leaf);
}
while cur.len() > 1 {
let mut next = Vec::new();
while!cur.is_empty() {
if cur.len() == 1 {
next.push(cur.remove(0));
}
else {
let left = cur.remove(0);
let right = cur.remove(0);
let combined_hash = Hash::hash_nodes(
left.hash(),
right.hash()
)?;
let node = Tree::Node {
hash: combined_hash.to_vec(),
left: Box::new(left),
right: Box::new(right)
};
next.push(node);
nodes_count+=1;
}
}
height += 1;
cur = next;
}
debug_assert!(cur.len() == 1);
let root = cur.remove(0);
Ok(MerkleTree {
root: root,
height: height,
count: count,
nodes_count: nodes_count
})
}
/// Returns the root hash of Merkle tree
pub fn root_hash(&self) -> &Vec<u8> {
self.root.hash()
}
/// Returns the hex root hash of Merkle tree
pub fn root_hash_hex(&self) -> String {
let rh = self.root.hash();
let mut ret:String = String::with_capacity(HASH_OUTPUT_LEN*2);
for i in rh {
ret.push_str(&format!("{:02x}", i));
}
return ret;
}
/// Returns the height of Merkle tree
pub fn height(&self) -> usize {
self.height
}
/// Returns the number of leaves in the Merkle tree
pub fn count(&self) -> usize {
self.count
}
/// Returns whether the Merkle tree is empty or not
pub fn is_empty(&self) -> bool {
self.count() == 0
}
/// Generate an inclusion proof for the given value.
/// Returns `None` if the given value is not found in the tree.
pub fn gen_proof(&self, value: TreeLeafData) -> Result<Option<Proof>, CryptoError> {
let root_hash = self.root_hash().clone();
let leaf_hash = Hash::hash_leaf(&value)?;
Ok(Lemma::new(&self.root, leaf_hash.to_vec().as_slice()).map(|lemma|
Proof::new(root_hash, lemma, value)
))
}
/// Creates an `Iterator` over the values contained in this Merkle tree.
pub fn iter(&self) -> LeavesIterator {
self.root.iter()
}
}
impl IntoIterator for MerkleTree {
type Item = TreeLeafData;
type IntoIter = LeavesIntoIterator;
/// Creates a consuming iterator, that is, one that moves each value out of the Merkle tree.
/// The tree cannot be used after calling this.
fn into_iter(self) -> Self::IntoIter {
self.root.into_iter()
}
}
impl <'a> IntoIterator for &'a MerkleTree {
type Item = &'a TreeLeafData;
type IntoIter = LeavesIterator<'a>;
/// Creates a borrowing `Iterator` over the values contained in this Merkle tree.
fn into_iter(self) -> Self::IntoIter {
self.root.iter()
}
}
| {
return Ok(MerkleTree {
root: Tree::empty(Hash::hash_empty()?),
height: 0,
count: 0,
nodes_count: 0
});
} | conditional_block |
merkletree.rs | use services::ledger::merkletree::tree::{ Tree, LeavesIterator, LeavesIntoIterator, TreeLeafData };
use services::ledger::merkletree::proof::{ Proof, Lemma };
use utils::crypto::hash::{Hash, HASH_OUTPUT_LEN};
use errors::crypto::CryptoError;
/// A Merkle tree is a binary tree, with values of type `T` at the leafs,
/// and where every internal node holds the hash of the concatenation of the hashes of its children nodes.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct MerkleTree {
/// The root of the inner binary tree
pub root: Tree,
/// The height of the tree
pub height: usize,
/// The number of leaf nodes in the tree
pub count: usize,
/// The number of nodes in the tree
pub nodes_count: usize
}
impl MerkleTree {
/// Constructs a Merkle Tree from a vector of data blocks.
/// Returns `None` if `values` is empty.
pub fn from_vec(values: Vec<TreeLeafData>) -> Result<Self, CryptoError> {
if values.is_empty() {
return Ok(MerkleTree {
root: Tree::empty(Hash::hash_empty()?),
height: 0,
count: 0,
nodes_count: 0
});
}
let count = values.len();
let mut nodes_count = 0;
let mut height = 0;
let mut cur = Vec::with_capacity(count);
for v in values {
let leaf = Tree::new_leaf(v)?;
cur.push(leaf);
}
while cur.len() > 1 {
let mut next = Vec::new();
while!cur.is_empty() {
if cur.len() == 1 {
next.push(cur.remove(0));
}
else {
let left = cur.remove(0);
let right = cur.remove(0);
let combined_hash = Hash::hash_nodes(
left.hash(),
right.hash()
)?;
let node = Tree::Node {
hash: combined_hash.to_vec(),
left: Box::new(left),
right: Box::new(right)
};
next.push(node);
nodes_count+=1;
}
}
height += 1;
cur = next;
}
debug_assert!(cur.len() == 1);
let root = cur.remove(0);
Ok(MerkleTree {
root: root,
height: height,
count: count,
nodes_count: nodes_count
})
}
/// Returns the root hash of Merkle tree
pub fn root_hash(&self) -> &Vec<u8> {
self.root.hash()
}
/// Returns the hex root hash of Merkle tree
pub fn root_hash_hex(&self) -> String {
let rh = self.root.hash();
let mut ret:String = String::with_capacity(HASH_OUTPUT_LEN*2);
for i in rh {
ret.push_str(&format!("{:02x}", i));
}
return ret;
}
/// Returns the height of Merkle tree
pub fn height(&self) -> usize {
self.height
}
/// Returns the number of leaves in the Merkle tree
pub fn count(&self) -> usize |
/// Returns whether the Merkle tree is empty or not
pub fn is_empty(&self) -> bool {
self.count() == 0
}
/// Generate an inclusion proof for the given value.
/// Returns `None` if the given value is not found in the tree.
pub fn gen_proof(&self, value: TreeLeafData) -> Result<Option<Proof>, CryptoError> {
let root_hash = self.root_hash().clone();
let leaf_hash = Hash::hash_leaf(&value)?;
Ok(Lemma::new(&self.root, leaf_hash.to_vec().as_slice()).map(|lemma|
Proof::new(root_hash, lemma, value)
))
}
/// Creates an `Iterator` over the values contained in this Merkle tree.
pub fn iter(&self) -> LeavesIterator {
self.root.iter()
}
}
impl IntoIterator for MerkleTree {
type Item = TreeLeafData;
type IntoIter = LeavesIntoIterator;
/// Creates a consuming iterator, that is, one that moves each value out of the Merkle tree.
/// The tree cannot be used after calling this.
fn into_iter(self) -> Self::IntoIter {
self.root.into_iter()
}
}
impl <'a> IntoIterator for &'a MerkleTree {
type Item = &'a TreeLeafData;
type IntoIter = LeavesIterator<'a>;
/// Creates a borrowing `Iterator` over the values contained in this Merkle tree.
fn into_iter(self) -> Self::IntoIter {
self.root.iter()
}
}
| {
self.count
} | identifier_body |
panic_in_result_fn_debug_assertions.rs | #![warn(clippy::panic_in_result_fn)]
#![allow(clippy::unnecessary_wraps)]
// debug_assert should never trigger the `panic_in_result_fn` lint
struct A;
impl A {
fn | (x: i32) -> Result<bool, String> {
debug_assert!(x == 5, "wrong argument");
Ok(true)
}
fn result_with_debug_assert_eq(x: i32) -> Result<bool, String> {
debug_assert_eq!(x, 5);
Ok(true)
}
fn result_with_debug_assert_ne(x: i32) -> Result<bool, String> {
debug_assert_ne!(x, 1);
Ok(true)
}
fn other_with_debug_assert_with_message(x: i32) {
debug_assert!(x == 5, "wrong argument");
}
fn other_with_debug_assert_eq(x: i32) {
debug_assert_eq!(x, 5);
}
fn other_with_debug_assert_ne(x: i32) {
debug_assert_ne!(x, 1);
}
fn result_without_banned_functions() -> Result<bool, String> {
let debug_assert = "debug_assert!";
println!("No {}", debug_assert);
Ok(true)
}
}
fn main() {}
| result_with_debug_assert_with_message | identifier_name |
panic_in_result_fn_debug_assertions.rs | #![warn(clippy::panic_in_result_fn)]
#![allow(clippy::unnecessary_wraps)]
// debug_assert should never trigger the `panic_in_result_fn` lint
struct A;
impl A {
fn result_with_debug_assert_with_message(x: i32) -> Result<bool, String> {
debug_assert!(x == 5, "wrong argument");
Ok(true)
}
fn result_with_debug_assert_eq(x: i32) -> Result<bool, String> {
debug_assert_eq!(x, 5);
Ok(true)
}
fn result_with_debug_assert_ne(x: i32) -> Result<bool, String> {
debug_assert_ne!(x, 1);
Ok(true)
}
fn other_with_debug_assert_with_message(x: i32) {
debug_assert!(x == 5, "wrong argument");
}
fn other_with_debug_assert_eq(x: i32) {
debug_assert_eq!(x, 5);
}
fn other_with_debug_assert_ne(x: i32) {
debug_assert_ne!(x, 1);
}
fn result_without_banned_functions() -> Result<bool, String> {
let debug_assert = "debug_assert!";
println!("No {}", debug_assert);
Ok(true)
}
}
fn main() | {} | identifier_body |
|
panic_in_result_fn_debug_assertions.rs | #![warn(clippy::panic_in_result_fn)]
#![allow(clippy::unnecessary_wraps)] | // debug_assert should never trigger the `panic_in_result_fn` lint
struct A;
impl A {
fn result_with_debug_assert_with_message(x: i32) -> Result<bool, String> {
debug_assert!(x == 5, "wrong argument");
Ok(true)
}
fn result_with_debug_assert_eq(x: i32) -> Result<bool, String> {
debug_assert_eq!(x, 5);
Ok(true)
}
fn result_with_debug_assert_ne(x: i32) -> Result<bool, String> {
debug_assert_ne!(x, 1);
Ok(true)
}
fn other_with_debug_assert_with_message(x: i32) {
debug_assert!(x == 5, "wrong argument");
}
fn other_with_debug_assert_eq(x: i32) {
debug_assert_eq!(x, 5);
}
fn other_with_debug_assert_ne(x: i32) {
debug_assert_ne!(x, 1);
}
fn result_without_banned_functions() -> Result<bool, String> {
let debug_assert = "debug_assert!";
println!("No {}", debug_assert);
Ok(true)
}
}
fn main() {} | random_line_split |
|
htmltabledatacellelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::HTMLTableDataCellElementBinding;
use dom::document::AbstractDocument;
use dom::element::HTMLTableDataCellElementTypeId;
use dom::htmltablecellelement::HTMLTableCellElement;
use dom::node::{AbstractNode, Node};
pub struct HTMLTableDataCellElement {
htmltablecellelement: HTMLTableCellElement,
}
impl HTMLTableDataCellElement {
pub fn | (localName: ~str, document: AbstractDocument) -> HTMLTableDataCellElement {
HTMLTableDataCellElement {
htmltablecellelement: HTMLTableCellElement::new_inherited(HTMLTableDataCellElementTypeId, localName, document)
}
}
pub fn new(localName: ~str, document: AbstractDocument) -> AbstractNode {
let element = HTMLTableDataCellElement::new_inherited(localName, document);
Node::reflect_node(@mut element, document, HTMLTableDataCellElementBinding::Wrap)
}
}
| new_inherited | identifier_name |
htmltabledatacellelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::HTMLTableDataCellElementBinding;
use dom::document::AbstractDocument;
use dom::element::HTMLTableDataCellElementTypeId;
use dom::htmltablecellelement::HTMLTableCellElement;
use dom::node::{AbstractNode, Node};
pub struct HTMLTableDataCellElement {
htmltablecellelement: HTMLTableCellElement,
}
impl HTMLTableDataCellElement {
pub fn new_inherited(localName: ~str, document: AbstractDocument) -> HTMLTableDataCellElement |
pub fn new(localName: ~str, document: AbstractDocument) -> AbstractNode {
let element = HTMLTableDataCellElement::new_inherited(localName, document);
Node::reflect_node(@mut element, document, HTMLTableDataCellElementBinding::Wrap)
}
}
| {
HTMLTableDataCellElement {
htmltablecellelement: HTMLTableCellElement::new_inherited(HTMLTableDataCellElementTypeId, localName, document)
}
} | identifier_body |
htmltabledatacellelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::HTMLTableDataCellElementBinding;
use dom::document::AbstractDocument;
use dom::element::HTMLTableDataCellElementTypeId;
use dom::htmltablecellelement::HTMLTableCellElement;
use dom::node::{AbstractNode, Node};
pub struct HTMLTableDataCellElement {
htmltablecellelement: HTMLTableCellElement, | pub fn new_inherited(localName: ~str, document: AbstractDocument) -> HTMLTableDataCellElement {
HTMLTableDataCellElement {
htmltablecellelement: HTMLTableCellElement::new_inherited(HTMLTableDataCellElementTypeId, localName, document)
}
}
pub fn new(localName: ~str, document: AbstractDocument) -> AbstractNode {
let element = HTMLTableDataCellElement::new_inherited(localName, document);
Node::reflect_node(@mut element, document, HTMLTableDataCellElementBinding::Wrap)
}
} | }
impl HTMLTableDataCellElement { | random_line_split |
_9_1_geometry_shader_houses.rs | #![allow(non_upper_case_globals)]
#![allow(non_snake_case)]
use std::ptr;
use std::mem;
use std::os::raw::c_void;
extern crate glfw;
use self::glfw::Context;
extern crate gl;
use self::gl::types::*;
use cgmath::{Point3};
use common::{process_events, processInput};
use shader::Shader;
use camera::Camera;
// settings
const SCR_WIDTH: u32 = 1280;
const SCR_HEIGHT: u32 = 720;
pub fn | () {
let mut camera = Camera {
Position: Point3::new(0.0, 0.0, 3.0),
..Camera::default()
};
let mut firstMouse = true;
let mut lastX: f32 = SCR_WIDTH as f32 / 2.0;
let mut lastY: f32 = SCR_HEIGHT as f32 / 2.0;
// timing
let mut deltaTime: f32; // time between current frame and last frame
let mut lastFrame: f32 = 0.0;
// glfw: initialize and configure
// ------------------------------
let mut glfw = glfw::init(glfw::FAIL_ON_ERRORS).unwrap();
glfw.window_hint(glfw::WindowHint::ContextVersion(3, 3));
glfw.window_hint(glfw::WindowHint::OpenGlProfile(glfw::OpenGlProfileHint::Core));
#[cfg(target_os = "macos")]
glfw.window_hint(glfw::WindowHint::OpenGlForwardCompat(true));
// glfw window creation
// --------------------
let (mut window, events) = glfw.create_window(SCR_WIDTH, SCR_HEIGHT, "LearnOpenGL", glfw::WindowMode::Windowed)
.expect("Failed to create GLFW window");
window.make_current();
window.set_framebuffer_size_polling(true);
window.set_cursor_pos_polling(true);
window.set_scroll_polling(true);
// tell GLFW to capture our mouse
window.set_cursor_mode(glfw::CursorMode::Disabled);
// gl: load all OpenGL function pointers
// ---------------------------------------
gl::load_with(|symbol| window.get_proc_address(symbol) as *const _);
let (shader, VBO, VAO) = unsafe {
// configure global opengl state
// -----------------------------
gl::Enable(gl::DEPTH_TEST);
// build and compile shaders
// -------------------------
let shader = Shader::with_geometry_shader(
"src/_4_advanced_opengl/shaders/9.1.geometry_shader.vs",
"src/_4_advanced_opengl/shaders/9.1.geometry_shader.fs",
"src/_4_advanced_opengl/shaders/9.1.geometry_shader.gs"
);
// set up vertex data (and buffer(s)) and configure vertex attributes
// ------------------------------------------------------------------
let points: [f32; 20] = [
-0.5, 0.5, 1.0, 0.0, 0.0, // top-left
0.5, 0.5, 0.0, 1.0, 0.0, // top-right
0.5, -0.5, 0.0, 0.0, 1.0, // bottom-right
-0.5, -0.5, 1.0, 1.0, 0.0 // bottom-left
];
// cube VAO
let (mut VAO, mut VBO) = (0, 0);
gl::GenVertexArrays(1, &mut VAO);
gl::GenBuffers(1, &mut VBO);
gl::BindVertexArray(VAO);
gl::BindBuffer(gl::ARRAY_BUFFER, VBO);
gl::BufferData(gl::ARRAY_BUFFER,
(points.len() * mem::size_of::<GLfloat>()) as GLsizeiptr,
&points[0] as *const f32 as *const c_void,
gl::STATIC_DRAW);
let stride = 5 * mem::size_of::<GLfloat>() as GLsizei;
gl::EnableVertexAttribArray(0);
gl::VertexAttribPointer(0, 2, gl::FLOAT, gl::FALSE, stride, ptr::null());
gl::EnableVertexAttribArray(1);
gl::VertexAttribPointer(1, 3, gl::FLOAT, gl::FALSE, stride, (2 * mem::size_of::<GLfloat>()) as *const f32 as *const c_void);
gl::BindVertexArray(0);
(shader, VBO, VAO)
};
// render loop
// -----------
while!window.should_close() {
// per-frame time logic
// --------------------
let currentFrame = glfw.get_time() as f32;
deltaTime = currentFrame - lastFrame;
lastFrame = currentFrame;
// events
// -----
process_events(&events, &mut firstMouse, &mut lastX, &mut lastY, &mut camera);
// input
// -----
processInput(&mut window, deltaTime, &mut camera);
// render
// ------
unsafe {
gl::ClearColor(0.1, 0.1, 0.1, 1.0);
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
shader.useProgram();
gl::BindVertexArray(VAO);
gl::DrawArrays(gl::POINTS, 0, 4);
}
// glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.)
// -------------------------------------------------------------------------------
window.swap_buffers();
glfw.poll_events();
}
// optional: de-allocate all resources once they've outlived their purpose:
// ------------------------------------------------------------------------
unsafe {
gl::DeleteVertexArrays(1, &VAO);
gl::DeleteBuffers(1, &VBO);
}
}
| main_4_9_1 | identifier_name |
_9_1_geometry_shader_houses.rs | #![allow(non_upper_case_globals)]
#![allow(non_snake_case)]
use std::ptr;
use std::mem;
use std::os::raw::c_void;
extern crate glfw;
use self::glfw::Context;
extern crate gl;
use self::gl::types::*;
use cgmath::{Point3};
use common::{process_events, processInput};
use shader::Shader;
use camera::Camera;
// settings
const SCR_WIDTH: u32 = 1280;
const SCR_HEIGHT: u32 = 720;
pub fn main_4_9_1() | glfw.window_hint(glfw::WindowHint::OpenGlForwardCompat(true));
// glfw window creation
// --------------------
let (mut window, events) = glfw.create_window(SCR_WIDTH, SCR_HEIGHT, "LearnOpenGL", glfw::WindowMode::Windowed)
.expect("Failed to create GLFW window");
window.make_current();
window.set_framebuffer_size_polling(true);
window.set_cursor_pos_polling(true);
window.set_scroll_polling(true);
// tell GLFW to capture our mouse
window.set_cursor_mode(glfw::CursorMode::Disabled);
// gl: load all OpenGL function pointers
// ---------------------------------------
gl::load_with(|symbol| window.get_proc_address(symbol) as *const _);
let (shader, VBO, VAO) = unsafe {
// configure global opengl state
// -----------------------------
gl::Enable(gl::DEPTH_TEST);
// build and compile shaders
// -------------------------
let shader = Shader::with_geometry_shader(
"src/_4_advanced_opengl/shaders/9.1.geometry_shader.vs",
"src/_4_advanced_opengl/shaders/9.1.geometry_shader.fs",
"src/_4_advanced_opengl/shaders/9.1.geometry_shader.gs"
);
// set up vertex data (and buffer(s)) and configure vertex attributes
// ------------------------------------------------------------------
let points: [f32; 20] = [
-0.5, 0.5, 1.0, 0.0, 0.0, // top-left
0.5, 0.5, 0.0, 1.0, 0.0, // top-right
0.5, -0.5, 0.0, 0.0, 1.0, // bottom-right
-0.5, -0.5, 1.0, 1.0, 0.0 // bottom-left
];
// cube VAO
let (mut VAO, mut VBO) = (0, 0);
gl::GenVertexArrays(1, &mut VAO);
gl::GenBuffers(1, &mut VBO);
gl::BindVertexArray(VAO);
gl::BindBuffer(gl::ARRAY_BUFFER, VBO);
gl::BufferData(gl::ARRAY_BUFFER,
(points.len() * mem::size_of::<GLfloat>()) as GLsizeiptr,
&points[0] as *const f32 as *const c_void,
gl::STATIC_DRAW);
let stride = 5 * mem::size_of::<GLfloat>() as GLsizei;
gl::EnableVertexAttribArray(0);
gl::VertexAttribPointer(0, 2, gl::FLOAT, gl::FALSE, stride, ptr::null());
gl::EnableVertexAttribArray(1);
gl::VertexAttribPointer(1, 3, gl::FLOAT, gl::FALSE, stride, (2 * mem::size_of::<GLfloat>()) as *const f32 as *const c_void);
gl::BindVertexArray(0);
(shader, VBO, VAO)
};
// render loop
// -----------
while!window.should_close() {
// per-frame time logic
// --------------------
let currentFrame = glfw.get_time() as f32;
deltaTime = currentFrame - lastFrame;
lastFrame = currentFrame;
// events
// -----
process_events(&events, &mut firstMouse, &mut lastX, &mut lastY, &mut camera);
// input
// -----
processInput(&mut window, deltaTime, &mut camera);
// render
// ------
unsafe {
gl::ClearColor(0.1, 0.1, 0.1, 1.0);
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
shader.useProgram();
gl::BindVertexArray(VAO);
gl::DrawArrays(gl::POINTS, 0, 4);
}
// glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.)
// -------------------------------------------------------------------------------
window.swap_buffers();
glfw.poll_events();
}
// optional: de-allocate all resources once they've outlived their purpose:
// ------------------------------------------------------------------------
unsafe {
gl::DeleteVertexArrays(1, &VAO);
gl::DeleteBuffers(1, &VBO);
}
}
| {
let mut camera = Camera {
Position: Point3::new(0.0, 0.0, 3.0),
..Camera::default()
};
let mut firstMouse = true;
let mut lastX: f32 = SCR_WIDTH as f32 / 2.0;
let mut lastY: f32 = SCR_HEIGHT as f32 / 2.0;
// timing
let mut deltaTime: f32; // time between current frame and last frame
let mut lastFrame: f32 = 0.0;
// glfw: initialize and configure
// ------------------------------
let mut glfw = glfw::init(glfw::FAIL_ON_ERRORS).unwrap();
glfw.window_hint(glfw::WindowHint::ContextVersion(3, 3));
glfw.window_hint(glfw::WindowHint::OpenGlProfile(glfw::OpenGlProfileHint::Core));
#[cfg(target_os = "macos")] | identifier_body |
_9_1_geometry_shader_houses.rs | #![allow(non_upper_case_globals)]
#![allow(non_snake_case)]
use std::ptr;
use std::mem;
use std::os::raw::c_void;
extern crate glfw;
use self::glfw::Context;
extern crate gl;
use self::gl::types::*;
use cgmath::{Point3};
use common::{process_events, processInput};
use shader::Shader;
use camera::Camera;
// settings
const SCR_WIDTH: u32 = 1280;
const SCR_HEIGHT: u32 = 720;
pub fn main_4_9_1() {
let mut camera = Camera {
Position: Point3::new(0.0, 0.0, 3.0),
..Camera::default()
};
let mut firstMouse = true;
let mut lastX: f32 = SCR_WIDTH as f32 / 2.0;
let mut lastY: f32 = SCR_HEIGHT as f32 / 2.0;
// timing
let mut deltaTime: f32; // time between current frame and last frame
let mut lastFrame: f32 = 0.0;
// glfw: initialize and configure
// ------------------------------
let mut glfw = glfw::init(glfw::FAIL_ON_ERRORS).unwrap();
glfw.window_hint(glfw::WindowHint::ContextVersion(3, 3));
glfw.window_hint(glfw::WindowHint::OpenGlProfile(glfw::OpenGlProfileHint::Core));
#[cfg(target_os = "macos")]
glfw.window_hint(glfw::WindowHint::OpenGlForwardCompat(true));
// glfw window creation
// --------------------
let (mut window, events) = glfw.create_window(SCR_WIDTH, SCR_HEIGHT, "LearnOpenGL", glfw::WindowMode::Windowed)
.expect("Failed to create GLFW window");
window.make_current();
window.set_framebuffer_size_polling(true);
window.set_cursor_pos_polling(true);
window.set_scroll_polling(true);
// tell GLFW to capture our mouse
window.set_cursor_mode(glfw::CursorMode::Disabled);
// gl: load all OpenGL function pointers
// ---------------------------------------
gl::load_with(|symbol| window.get_proc_address(symbol) as *const _);
let (shader, VBO, VAO) = unsafe {
// configure global opengl state
// -----------------------------
gl::Enable(gl::DEPTH_TEST);
// build and compile shaders
// -------------------------
let shader = Shader::with_geometry_shader(
"src/_4_advanced_opengl/shaders/9.1.geometry_shader.vs",
"src/_4_advanced_opengl/shaders/9.1.geometry_shader.fs",
"src/_4_advanced_opengl/shaders/9.1.geometry_shader.gs"
);
// set up vertex data (and buffer(s)) and configure vertex attributes
// ------------------------------------------------------------------
let points: [f32; 20] = [
-0.5, 0.5, 1.0, 0.0, 0.0, // top-left
0.5, 0.5, 0.0, 1.0, 0.0, // top-right
0.5, -0.5, 0.0, 0.0, 1.0, // bottom-right
-0.5, -0.5, 1.0, 1.0, 0.0 // bottom-left
];
// cube VAO
let (mut VAO, mut VBO) = (0, 0);
gl::GenVertexArrays(1, &mut VAO);
gl::GenBuffers(1, &mut VBO);
gl::BindVertexArray(VAO);
gl::BindBuffer(gl::ARRAY_BUFFER, VBO);
gl::BufferData(gl::ARRAY_BUFFER,
(points.len() * mem::size_of::<GLfloat>()) as GLsizeiptr,
&points[0] as *const f32 as *const c_void,
gl::STATIC_DRAW);
let stride = 5 * mem::size_of::<GLfloat>() as GLsizei;
gl::EnableVertexAttribArray(0);
gl::VertexAttribPointer(0, 2, gl::FLOAT, gl::FALSE, stride, ptr::null());
gl::EnableVertexAttribArray(1);
gl::VertexAttribPointer(1, 3, gl::FLOAT, gl::FALSE, stride, (2 * mem::size_of::<GLfloat>()) as *const f32 as *const c_void);
gl::BindVertexArray(0);
(shader, VBO, VAO)
};
// render loop
// -----------
while!window.should_close() {
// per-frame time logic
// --------------------
let currentFrame = glfw.get_time() as f32;
deltaTime = currentFrame - lastFrame;
lastFrame = currentFrame;
// events
// -----
process_events(&events, &mut firstMouse, &mut lastX, &mut lastY, &mut camera);
| processInput(&mut window, deltaTime, &mut camera);
// render
// ------
unsafe {
gl::ClearColor(0.1, 0.1, 0.1, 1.0);
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
shader.useProgram();
gl::BindVertexArray(VAO);
gl::DrawArrays(gl::POINTS, 0, 4);
}
// glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.)
// -------------------------------------------------------------------------------
window.swap_buffers();
glfw.poll_events();
}
// optional: de-allocate all resources once they've outlived their purpose:
// ------------------------------------------------------------------------
unsafe {
gl::DeleteVertexArrays(1, &VAO);
gl::DeleteBuffers(1, &VBO);
}
} | // input
// ----- | random_line_split |
mod.rs | mod env;
mod model;
mod uploader;
use async_trait::async_trait;
use http::Uri;
use model::endpoint::Endpoint;
use opentelemetry::sdk::resource::ResourceDetector;
use opentelemetry::sdk::resource::SdkProvidedResourceDetector;
use opentelemetry::sdk::trace::Config;
use opentelemetry::sdk::Resource;
use opentelemetry::{
global, sdk,
sdk::export::{trace, ExportError},
sdk::trace::TraceRuntime,
trace::{TraceError, TracerProvider},
KeyValue,
};
use opentelemetry_http::HttpClient;
use opentelemetry_semantic_conventions as semcov;
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
feature = "surf-client"
))]
use std::convert::TryFrom;
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
/// Zipkin span exporter
#[derive(Debug)]
pub struct Exporter {
local_endpoint: Endpoint,
uploader: uploader::Uploader,
}
impl Exporter {
fn new(local_endpoint: Endpoint, client: Box<dyn HttpClient>, collector_endpoint: Uri) -> Self {
Exporter {
local_endpoint,
uploader: uploader::Uploader::new(client, collector_endpoint),
}
}
}
/// Create a new Zipkin exporter pipeline builder.
pub fn new_pipeline() -> ZipkinPipelineBuilder {
ZipkinPipelineBuilder::default()
}
/// Builder for `ExporterConfig` struct.
#[derive(Debug)]
pub struct ZipkinPipelineBuilder {
service_name: Option<String>,
service_addr: Option<SocketAddr>,
collector_endpoint: String,
trace_config: Option<sdk::trace::Config>,
client: Option<Box<dyn HttpClient>>,
}
impl Default for ZipkinPipelineBuilder {
fn default() -> Self {
let timeout = env::get_timeout();
ZipkinPipelineBuilder {
#[cfg(feature = "reqwest-blocking-client")]
client: Some(Box::new(
reqwest::blocking::Client::builder()
.timeout(timeout)
.build()
.unwrap_or_else(|_| reqwest::blocking::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-blocking-client"),
not(feature = "surf-client"),
feature = "reqwest-client"
))]
client: Some(Box::new(
reqwest::Client::builder()
.timeout(timeout)
.build()
.unwrap_or_else(|_| reqwest::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
feature = "surf-client"
))]
client: Some(Box::new(
surf::Client::try_from(surf::Config::new().set_timeout(Some(timeout)))
.unwrap_or_else(|_| surf::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "surf-client"),
not(feature = "reqwest-blocking-client")
))]
client: None,
service_name: None,
service_addr: None,
collector_endpoint: env::get_endpoint(),
trace_config: None,
}
}
}
impl ZipkinPipelineBuilder {
/// Initial a Zipkin span exporter.
///
/// Returns error if the endpoint is not valid or if no http client is provided.
pub fn init_exporter(mut self) -> Result<Exporter, TraceError> {
let (_, endpoint) = self.init_config_and_endpoint();
self.init_exporter_with_endpoint(endpoint)
}
fn init_config_and_endpoint(&mut self) -> (Config, Endpoint) {
let service_name = self.service_name.take();
if let Some(service_name) = service_name {
let config = if let Some(mut cfg) = self.trace_config.take() {
cfg.resource = cfg.resource.map(|r| {
let without_service_name = r
.iter()
.filter(|(k, _v)| **k!= semcov::resource::SERVICE_NAME)
.map(|(k, v)| KeyValue::new(k.clone(), v.clone()))
.collect::<Vec<KeyValue>>();
Arc::new(Resource::new(without_service_name))
});
cfg
} else {
Config {
resource: Some(Arc::new(Resource::empty())),
..Default::default()
}
};
(config, Endpoint::new(service_name, self.service_addr))
} else {
let service_name = SdkProvidedResourceDetector
.detect(Duration::from_secs(0))
.get(semcov::resource::SERVICE_NAME)
.unwrap()
.to_string();
(
Config {
// use a empty resource to prevent TracerProvider to assign a service name.
resource: Some(Arc::new(Resource::empty())),
..Default::default()
},
Endpoint::new(service_name, self.service_addr),
)
}
}
fn init_exporter_with_endpoint(self, endpoint: Endpoint) -> Result<Exporter, TraceError> {
if let Some(client) = self.client {
let exporter = Exporter::new(
endpoint,
client,
self.collector_endpoint
.parse()
.map_err::<Error, _>(Into::into)?,
);
Ok(exporter)
} else {
Err(Error::NoHttpClient.into())
}
}
/// Install the Zipkin trace exporter pipeline with a simple span processor.
pub fn install_simple(mut self) -> Result<sdk::trace::Tracer, TraceError> {
let (config, endpoint) = self.init_config_and_endpoint();
let exporter = self.init_exporter_with_endpoint(endpoint)?;
let mut provider_builder =
sdk::trace::TracerProvider::builder().with_simple_exporter(exporter);
provider_builder = provider_builder.with_config(config);
let provider = provider_builder.build();
let tracer = provider.versioned_tracer(
"opentelemetry-zipkin",
Some(env!("CARGO_PKG_VERSION")),
None,
);
let _ = global::set_tracer_provider(provider);
Ok(tracer)
}
/// Install the Zipkin trace exporter pipeline with a batch span processor using the specified
/// runtime.
pub fn install_batch<R: TraceRuntime>(
mut self,
runtime: R,
) -> Result<sdk::trace::Tracer, TraceError> {
let (config, endpoint) = self.init_config_and_endpoint();
let exporter = self.init_exporter_with_endpoint(endpoint)?;
let mut provider_builder =
sdk::trace::TracerProvider::builder().with_batch_exporter(exporter, runtime);
provider_builder = provider_builder.with_config(config);
let provider = provider_builder.build();
let tracer = provider.versioned_tracer(
"opentelemetry-zipkin",
Some(env!("CARGO_PKG_VERSION")),
None,
);
let _ = global::set_tracer_provider(provider);
Ok(tracer)
}
|
/// Assign client implementation
pub fn with_http_client<T: HttpClient +'static>(mut self, client: T) -> Self {
self.client = Some(Box::new(client));
self
}
/// Assign the service name under which to group traces.
pub fn with_service_address(mut self, addr: SocketAddr) -> Self {
self.service_addr = Some(addr);
self
}
/// Assign the Zipkin collector endpoint
pub fn with_collector_endpoint<T: Into<String>>(mut self, endpoint: T) -> Self {
self.collector_endpoint = endpoint.into();
self
}
/// Assign the SDK trace configuration.
pub fn with_trace_config(mut self, config: sdk::trace::Config) -> Self {
self.trace_config = Some(config);
self
}
}
#[async_trait]
impl trace::SpanExporter for Exporter {
/// Export spans to Zipkin collector.
async fn export(&mut self, batch: Vec<trace::SpanData>) -> trace::ExportResult {
let zipkin_spans = batch
.into_iter()
.map(|span| model::into_zipkin_span(self.local_endpoint.clone(), span))
.collect();
self.uploader.upload(zipkin_spans).await
}
}
/// Wrap type for errors from opentelemetry zipkin
#[derive(thiserror::Error, Debug)]
#[non_exhaustive]
pub enum Error {
/// No http client implementation found. User should provide one or enable features.
#[error("http client must be set, users can enable reqwest or surf feature to use http client implementation within create")]
NoHttpClient,
/// Http requests failed
#[error("http request failed with {0}")]
RequestFailed(#[from] http::Error),
/// The uri provided is invalid
#[error("invalid uri")]
InvalidUri(#[from] http::uri::InvalidUri),
/// Other errors
#[error("export error: {0}")]
Other(String),
}
impl ExportError for Error {
fn exporter_name(&self) -> &'static str {
"zipkin"
}
} | /// Assign the service name under which to group traces.
pub fn with_service_name<T: Into<String>>(mut self, name: T) -> Self {
self.service_name = Some(name.into());
self
} | random_line_split |
mod.rs | mod env;
mod model;
mod uploader;
use async_trait::async_trait;
use http::Uri;
use model::endpoint::Endpoint;
use opentelemetry::sdk::resource::ResourceDetector;
use opentelemetry::sdk::resource::SdkProvidedResourceDetector;
use opentelemetry::sdk::trace::Config;
use opentelemetry::sdk::Resource;
use opentelemetry::{
global, sdk,
sdk::export::{trace, ExportError},
sdk::trace::TraceRuntime,
trace::{TraceError, TracerProvider},
KeyValue,
};
use opentelemetry_http::HttpClient;
use opentelemetry_semantic_conventions as semcov;
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
feature = "surf-client"
))]
use std::convert::TryFrom;
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
/// Zipkin span exporter
#[derive(Debug)]
pub struct Exporter {
local_endpoint: Endpoint,
uploader: uploader::Uploader,
}
impl Exporter {
fn | (local_endpoint: Endpoint, client: Box<dyn HttpClient>, collector_endpoint: Uri) -> Self {
Exporter {
local_endpoint,
uploader: uploader::Uploader::new(client, collector_endpoint),
}
}
}
/// Create a new Zipkin exporter pipeline builder.
pub fn new_pipeline() -> ZipkinPipelineBuilder {
ZipkinPipelineBuilder::default()
}
/// Builder for `ExporterConfig` struct.
#[derive(Debug)]
pub struct ZipkinPipelineBuilder {
service_name: Option<String>,
service_addr: Option<SocketAddr>,
collector_endpoint: String,
trace_config: Option<sdk::trace::Config>,
client: Option<Box<dyn HttpClient>>,
}
impl Default for ZipkinPipelineBuilder {
fn default() -> Self {
let timeout = env::get_timeout();
ZipkinPipelineBuilder {
#[cfg(feature = "reqwest-blocking-client")]
client: Some(Box::new(
reqwest::blocking::Client::builder()
.timeout(timeout)
.build()
.unwrap_or_else(|_| reqwest::blocking::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-blocking-client"),
not(feature = "surf-client"),
feature = "reqwest-client"
))]
client: Some(Box::new(
reqwest::Client::builder()
.timeout(timeout)
.build()
.unwrap_or_else(|_| reqwest::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
feature = "surf-client"
))]
client: Some(Box::new(
surf::Client::try_from(surf::Config::new().set_timeout(Some(timeout)))
.unwrap_or_else(|_| surf::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "surf-client"),
not(feature = "reqwest-blocking-client")
))]
client: None,
service_name: None,
service_addr: None,
collector_endpoint: env::get_endpoint(),
trace_config: None,
}
}
}
impl ZipkinPipelineBuilder {
/// Initial a Zipkin span exporter.
///
/// Returns error if the endpoint is not valid or if no http client is provided.
pub fn init_exporter(mut self) -> Result<Exporter, TraceError> {
let (_, endpoint) = self.init_config_and_endpoint();
self.init_exporter_with_endpoint(endpoint)
}
fn init_config_and_endpoint(&mut self) -> (Config, Endpoint) {
let service_name = self.service_name.take();
if let Some(service_name) = service_name {
let config = if let Some(mut cfg) = self.trace_config.take() {
cfg.resource = cfg.resource.map(|r| {
let without_service_name = r
.iter()
.filter(|(k, _v)| **k!= semcov::resource::SERVICE_NAME)
.map(|(k, v)| KeyValue::new(k.clone(), v.clone()))
.collect::<Vec<KeyValue>>();
Arc::new(Resource::new(without_service_name))
});
cfg
} else {
Config {
resource: Some(Arc::new(Resource::empty())),
..Default::default()
}
};
(config, Endpoint::new(service_name, self.service_addr))
} else {
let service_name = SdkProvidedResourceDetector
.detect(Duration::from_secs(0))
.get(semcov::resource::SERVICE_NAME)
.unwrap()
.to_string();
(
Config {
// use a empty resource to prevent TracerProvider to assign a service name.
resource: Some(Arc::new(Resource::empty())),
..Default::default()
},
Endpoint::new(service_name, self.service_addr),
)
}
}
fn init_exporter_with_endpoint(self, endpoint: Endpoint) -> Result<Exporter, TraceError> {
if let Some(client) = self.client {
let exporter = Exporter::new(
endpoint,
client,
self.collector_endpoint
.parse()
.map_err::<Error, _>(Into::into)?,
);
Ok(exporter)
} else {
Err(Error::NoHttpClient.into())
}
}
/// Install the Zipkin trace exporter pipeline with a simple span processor.
pub fn install_simple(mut self) -> Result<sdk::trace::Tracer, TraceError> {
let (config, endpoint) = self.init_config_and_endpoint();
let exporter = self.init_exporter_with_endpoint(endpoint)?;
let mut provider_builder =
sdk::trace::TracerProvider::builder().with_simple_exporter(exporter);
provider_builder = provider_builder.with_config(config);
let provider = provider_builder.build();
let tracer = provider.versioned_tracer(
"opentelemetry-zipkin",
Some(env!("CARGO_PKG_VERSION")),
None,
);
let _ = global::set_tracer_provider(provider);
Ok(tracer)
}
/// Install the Zipkin trace exporter pipeline with a batch span processor using the specified
/// runtime.
pub fn install_batch<R: TraceRuntime>(
mut self,
runtime: R,
) -> Result<sdk::trace::Tracer, TraceError> {
let (config, endpoint) = self.init_config_and_endpoint();
let exporter = self.init_exporter_with_endpoint(endpoint)?;
let mut provider_builder =
sdk::trace::TracerProvider::builder().with_batch_exporter(exporter, runtime);
provider_builder = provider_builder.with_config(config);
let provider = provider_builder.build();
let tracer = provider.versioned_tracer(
"opentelemetry-zipkin",
Some(env!("CARGO_PKG_VERSION")),
None,
);
let _ = global::set_tracer_provider(provider);
Ok(tracer)
}
/// Assign the service name under which to group traces.
pub fn with_service_name<T: Into<String>>(mut self, name: T) -> Self {
self.service_name = Some(name.into());
self
}
/// Assign client implementation
pub fn with_http_client<T: HttpClient +'static>(mut self, client: T) -> Self {
self.client = Some(Box::new(client));
self
}
/// Assign the service name under which to group traces.
pub fn with_service_address(mut self, addr: SocketAddr) -> Self {
self.service_addr = Some(addr);
self
}
/// Assign the Zipkin collector endpoint
pub fn with_collector_endpoint<T: Into<String>>(mut self, endpoint: T) -> Self {
self.collector_endpoint = endpoint.into();
self
}
/// Assign the SDK trace configuration.
pub fn with_trace_config(mut self, config: sdk::trace::Config) -> Self {
self.trace_config = Some(config);
self
}
}
#[async_trait]
impl trace::SpanExporter for Exporter {
/// Export spans to Zipkin collector.
async fn export(&mut self, batch: Vec<trace::SpanData>) -> trace::ExportResult {
let zipkin_spans = batch
.into_iter()
.map(|span| model::into_zipkin_span(self.local_endpoint.clone(), span))
.collect();
self.uploader.upload(zipkin_spans).await
}
}
/// Wrap type for errors from opentelemetry zipkin
#[derive(thiserror::Error, Debug)]
#[non_exhaustive]
pub enum Error {
/// No http client implementation found. User should provide one or enable features.
#[error("http client must be set, users can enable reqwest or surf feature to use http client implementation within create")]
NoHttpClient,
/// Http requests failed
#[error("http request failed with {0}")]
RequestFailed(#[from] http::Error),
/// The uri provided is invalid
#[error("invalid uri")]
InvalidUri(#[from] http::uri::InvalidUri),
/// Other errors
#[error("export error: {0}")]
Other(String),
}
impl ExportError for Error {
fn exporter_name(&self) -> &'static str {
"zipkin"
}
}
| new | identifier_name |
mod.rs | mod env;
mod model;
mod uploader;
use async_trait::async_trait;
use http::Uri;
use model::endpoint::Endpoint;
use opentelemetry::sdk::resource::ResourceDetector;
use opentelemetry::sdk::resource::SdkProvidedResourceDetector;
use opentelemetry::sdk::trace::Config;
use opentelemetry::sdk::Resource;
use opentelemetry::{
global, sdk,
sdk::export::{trace, ExportError},
sdk::trace::TraceRuntime,
trace::{TraceError, TracerProvider},
KeyValue,
};
use opentelemetry_http::HttpClient;
use opentelemetry_semantic_conventions as semcov;
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
feature = "surf-client"
))]
use std::convert::TryFrom;
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
/// Zipkin span exporter
#[derive(Debug)]
pub struct Exporter {
local_endpoint: Endpoint,
uploader: uploader::Uploader,
}
impl Exporter {
fn new(local_endpoint: Endpoint, client: Box<dyn HttpClient>, collector_endpoint: Uri) -> Self {
Exporter {
local_endpoint,
uploader: uploader::Uploader::new(client, collector_endpoint),
}
}
}
/// Create a new Zipkin exporter pipeline builder.
pub fn new_pipeline() -> ZipkinPipelineBuilder {
ZipkinPipelineBuilder::default()
}
/// Builder for `ExporterConfig` struct.
#[derive(Debug)]
pub struct ZipkinPipelineBuilder {
service_name: Option<String>,
service_addr: Option<SocketAddr>,
collector_endpoint: String,
trace_config: Option<sdk::trace::Config>,
client: Option<Box<dyn HttpClient>>,
}
impl Default for ZipkinPipelineBuilder {
fn default() -> Self {
let timeout = env::get_timeout();
ZipkinPipelineBuilder {
#[cfg(feature = "reqwest-blocking-client")]
client: Some(Box::new(
reqwest::blocking::Client::builder()
.timeout(timeout)
.build()
.unwrap_or_else(|_| reqwest::blocking::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-blocking-client"),
not(feature = "surf-client"),
feature = "reqwest-client"
))]
client: Some(Box::new(
reqwest::Client::builder()
.timeout(timeout)
.build()
.unwrap_or_else(|_| reqwest::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
feature = "surf-client"
))]
client: Some(Box::new(
surf::Client::try_from(surf::Config::new().set_timeout(Some(timeout)))
.unwrap_or_else(|_| surf::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "surf-client"),
not(feature = "reqwest-blocking-client")
))]
client: None,
service_name: None,
service_addr: None,
collector_endpoint: env::get_endpoint(),
trace_config: None,
}
}
}
impl ZipkinPipelineBuilder {
/// Initial a Zipkin span exporter.
///
/// Returns error if the endpoint is not valid or if no http client is provided.
pub fn init_exporter(mut self) -> Result<Exporter, TraceError> {
let (_, endpoint) = self.init_config_and_endpoint();
self.init_exporter_with_endpoint(endpoint)
}
fn init_config_and_endpoint(&mut self) -> (Config, Endpoint) {
let service_name = self.service_name.take();
if let Some(service_name) = service_name | else {
let service_name = SdkProvidedResourceDetector
.detect(Duration::from_secs(0))
.get(semcov::resource::SERVICE_NAME)
.unwrap()
.to_string();
(
Config {
// use a empty resource to prevent TracerProvider to assign a service name.
resource: Some(Arc::new(Resource::empty())),
..Default::default()
},
Endpoint::new(service_name, self.service_addr),
)
}
}
fn init_exporter_with_endpoint(self, endpoint: Endpoint) -> Result<Exporter, TraceError> {
if let Some(client) = self.client {
let exporter = Exporter::new(
endpoint,
client,
self.collector_endpoint
.parse()
.map_err::<Error, _>(Into::into)?,
);
Ok(exporter)
} else {
Err(Error::NoHttpClient.into())
}
}
/// Install the Zipkin trace exporter pipeline with a simple span processor.
pub fn install_simple(mut self) -> Result<sdk::trace::Tracer, TraceError> {
let (config, endpoint) = self.init_config_and_endpoint();
let exporter = self.init_exporter_with_endpoint(endpoint)?;
let mut provider_builder =
sdk::trace::TracerProvider::builder().with_simple_exporter(exporter);
provider_builder = provider_builder.with_config(config);
let provider = provider_builder.build();
let tracer = provider.versioned_tracer(
"opentelemetry-zipkin",
Some(env!("CARGO_PKG_VERSION")),
None,
);
let _ = global::set_tracer_provider(provider);
Ok(tracer)
}
/// Install the Zipkin trace exporter pipeline with a batch span processor using the specified
/// runtime.
pub fn install_batch<R: TraceRuntime>(
mut self,
runtime: R,
) -> Result<sdk::trace::Tracer, TraceError> {
let (config, endpoint) = self.init_config_and_endpoint();
let exporter = self.init_exporter_with_endpoint(endpoint)?;
let mut provider_builder =
sdk::trace::TracerProvider::builder().with_batch_exporter(exporter, runtime);
provider_builder = provider_builder.with_config(config);
let provider = provider_builder.build();
let tracer = provider.versioned_tracer(
"opentelemetry-zipkin",
Some(env!("CARGO_PKG_VERSION")),
None,
);
let _ = global::set_tracer_provider(provider);
Ok(tracer)
}
/// Assign the service name under which to group traces.
pub fn with_service_name<T: Into<String>>(mut self, name: T) -> Self {
self.service_name = Some(name.into());
self
}
/// Assign client implementation
pub fn with_http_client<T: HttpClient +'static>(mut self, client: T) -> Self {
self.client = Some(Box::new(client));
self
}
/// Assign the service name under which to group traces.
pub fn with_service_address(mut self, addr: SocketAddr) -> Self {
self.service_addr = Some(addr);
self
}
/// Assign the Zipkin collector endpoint
pub fn with_collector_endpoint<T: Into<String>>(mut self, endpoint: T) -> Self {
self.collector_endpoint = endpoint.into();
self
}
/// Assign the SDK trace configuration.
pub fn with_trace_config(mut self, config: sdk::trace::Config) -> Self {
self.trace_config = Some(config);
self
}
}
#[async_trait]
impl trace::SpanExporter for Exporter {
/// Export spans to Zipkin collector.
async fn export(&mut self, batch: Vec<trace::SpanData>) -> trace::ExportResult {
let zipkin_spans = batch
.into_iter()
.map(|span| model::into_zipkin_span(self.local_endpoint.clone(), span))
.collect();
self.uploader.upload(zipkin_spans).await
}
}
/// Wrap type for errors from opentelemetry zipkin
#[derive(thiserror::Error, Debug)]
#[non_exhaustive]
pub enum Error {
/// No http client implementation found. User should provide one or enable features.
#[error("http client must be set, users can enable reqwest or surf feature to use http client implementation within create")]
NoHttpClient,
/// Http requests failed
#[error("http request failed with {0}")]
RequestFailed(#[from] http::Error),
/// The uri provided is invalid
#[error("invalid uri")]
InvalidUri(#[from] http::uri::InvalidUri),
/// Other errors
#[error("export error: {0}")]
Other(String),
}
impl ExportError for Error {
fn exporter_name(&self) -> &'static str {
"zipkin"
}
}
| {
let config = if let Some(mut cfg) = self.trace_config.take() {
cfg.resource = cfg.resource.map(|r| {
let without_service_name = r
.iter()
.filter(|(k, _v)| **k != semcov::resource::SERVICE_NAME)
.map(|(k, v)| KeyValue::new(k.clone(), v.clone()))
.collect::<Vec<KeyValue>>();
Arc::new(Resource::new(without_service_name))
});
cfg
} else {
Config {
resource: Some(Arc::new(Resource::empty())),
..Default::default()
}
};
(config, Endpoint::new(service_name, self.service_addr))
} | conditional_block |
mod.rs | mod env;
mod model;
mod uploader;
use async_trait::async_trait;
use http::Uri;
use model::endpoint::Endpoint;
use opentelemetry::sdk::resource::ResourceDetector;
use opentelemetry::sdk::resource::SdkProvidedResourceDetector;
use opentelemetry::sdk::trace::Config;
use opentelemetry::sdk::Resource;
use opentelemetry::{
global, sdk,
sdk::export::{trace, ExportError},
sdk::trace::TraceRuntime,
trace::{TraceError, TracerProvider},
KeyValue,
};
use opentelemetry_http::HttpClient;
use opentelemetry_semantic_conventions as semcov;
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
feature = "surf-client"
))]
use std::convert::TryFrom;
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
/// Zipkin span exporter
#[derive(Debug)]
pub struct Exporter {
local_endpoint: Endpoint,
uploader: uploader::Uploader,
}
impl Exporter {
fn new(local_endpoint: Endpoint, client: Box<dyn HttpClient>, collector_endpoint: Uri) -> Self |
}
/// Create a new Zipkin exporter pipeline builder.
pub fn new_pipeline() -> ZipkinPipelineBuilder {
ZipkinPipelineBuilder::default()
}
/// Builder for `ExporterConfig` struct.
#[derive(Debug)]
pub struct ZipkinPipelineBuilder {
service_name: Option<String>,
service_addr: Option<SocketAddr>,
collector_endpoint: String,
trace_config: Option<sdk::trace::Config>,
client: Option<Box<dyn HttpClient>>,
}
impl Default for ZipkinPipelineBuilder {
fn default() -> Self {
let timeout = env::get_timeout();
ZipkinPipelineBuilder {
#[cfg(feature = "reqwest-blocking-client")]
client: Some(Box::new(
reqwest::blocking::Client::builder()
.timeout(timeout)
.build()
.unwrap_or_else(|_| reqwest::blocking::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-blocking-client"),
not(feature = "surf-client"),
feature = "reqwest-client"
))]
client: Some(Box::new(
reqwest::Client::builder()
.timeout(timeout)
.build()
.unwrap_or_else(|_| reqwest::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
feature = "surf-client"
))]
client: Some(Box::new(
surf::Client::try_from(surf::Config::new().set_timeout(Some(timeout)))
.unwrap_or_else(|_| surf::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "surf-client"),
not(feature = "reqwest-blocking-client")
))]
client: None,
service_name: None,
service_addr: None,
collector_endpoint: env::get_endpoint(),
trace_config: None,
}
}
}
impl ZipkinPipelineBuilder {
/// Initial a Zipkin span exporter.
///
/// Returns error if the endpoint is not valid or if no http client is provided.
pub fn init_exporter(mut self) -> Result<Exporter, TraceError> {
let (_, endpoint) = self.init_config_and_endpoint();
self.init_exporter_with_endpoint(endpoint)
}
fn init_config_and_endpoint(&mut self) -> (Config, Endpoint) {
let service_name = self.service_name.take();
if let Some(service_name) = service_name {
let config = if let Some(mut cfg) = self.trace_config.take() {
cfg.resource = cfg.resource.map(|r| {
let without_service_name = r
.iter()
.filter(|(k, _v)| **k!= semcov::resource::SERVICE_NAME)
.map(|(k, v)| KeyValue::new(k.clone(), v.clone()))
.collect::<Vec<KeyValue>>();
Arc::new(Resource::new(without_service_name))
});
cfg
} else {
Config {
resource: Some(Arc::new(Resource::empty())),
..Default::default()
}
};
(config, Endpoint::new(service_name, self.service_addr))
} else {
let service_name = SdkProvidedResourceDetector
.detect(Duration::from_secs(0))
.get(semcov::resource::SERVICE_NAME)
.unwrap()
.to_string();
(
Config {
// use a empty resource to prevent TracerProvider to assign a service name.
resource: Some(Arc::new(Resource::empty())),
..Default::default()
},
Endpoint::new(service_name, self.service_addr),
)
}
}
fn init_exporter_with_endpoint(self, endpoint: Endpoint) -> Result<Exporter, TraceError> {
if let Some(client) = self.client {
let exporter = Exporter::new(
endpoint,
client,
self.collector_endpoint
.parse()
.map_err::<Error, _>(Into::into)?,
);
Ok(exporter)
} else {
Err(Error::NoHttpClient.into())
}
}
/// Install the Zipkin trace exporter pipeline with a simple span processor.
pub fn install_simple(mut self) -> Result<sdk::trace::Tracer, TraceError> {
let (config, endpoint) = self.init_config_and_endpoint();
let exporter = self.init_exporter_with_endpoint(endpoint)?;
let mut provider_builder =
sdk::trace::TracerProvider::builder().with_simple_exporter(exporter);
provider_builder = provider_builder.with_config(config);
let provider = provider_builder.build();
let tracer = provider.versioned_tracer(
"opentelemetry-zipkin",
Some(env!("CARGO_PKG_VERSION")),
None,
);
let _ = global::set_tracer_provider(provider);
Ok(tracer)
}
/// Install the Zipkin trace exporter pipeline with a batch span processor using the specified
/// runtime.
pub fn install_batch<R: TraceRuntime>(
mut self,
runtime: R,
) -> Result<sdk::trace::Tracer, TraceError> {
let (config, endpoint) = self.init_config_and_endpoint();
let exporter = self.init_exporter_with_endpoint(endpoint)?;
let mut provider_builder =
sdk::trace::TracerProvider::builder().with_batch_exporter(exporter, runtime);
provider_builder = provider_builder.with_config(config);
let provider = provider_builder.build();
let tracer = provider.versioned_tracer(
"opentelemetry-zipkin",
Some(env!("CARGO_PKG_VERSION")),
None,
);
let _ = global::set_tracer_provider(provider);
Ok(tracer)
}
/// Assign the service name under which to group traces.
pub fn with_service_name<T: Into<String>>(mut self, name: T) -> Self {
self.service_name = Some(name.into());
self
}
/// Assign client implementation
pub fn with_http_client<T: HttpClient +'static>(mut self, client: T) -> Self {
self.client = Some(Box::new(client));
self
}
/// Assign the service name under which to group traces.
pub fn with_service_address(mut self, addr: SocketAddr) -> Self {
self.service_addr = Some(addr);
self
}
/// Assign the Zipkin collector endpoint
pub fn with_collector_endpoint<T: Into<String>>(mut self, endpoint: T) -> Self {
self.collector_endpoint = endpoint.into();
self
}
/// Assign the SDK trace configuration.
pub fn with_trace_config(mut self, config: sdk::trace::Config) -> Self {
self.trace_config = Some(config);
self
}
}
#[async_trait]
impl trace::SpanExporter for Exporter {
/// Export spans to Zipkin collector.
async fn export(&mut self, batch: Vec<trace::SpanData>) -> trace::ExportResult {
let zipkin_spans = batch
.into_iter()
.map(|span| model::into_zipkin_span(self.local_endpoint.clone(), span))
.collect();
self.uploader.upload(zipkin_spans).await
}
}
/// Wrap type for errors from opentelemetry zipkin
#[derive(thiserror::Error, Debug)]
#[non_exhaustive]
pub enum Error {
/// No http client implementation found. User should provide one or enable features.
#[error("http client must be set, users can enable reqwest or surf feature to use http client implementation within create")]
NoHttpClient,
/// Http requests failed
#[error("http request failed with {0}")]
RequestFailed(#[from] http::Error),
/// The uri provided is invalid
#[error("invalid uri")]
InvalidUri(#[from] http::uri::InvalidUri),
/// Other errors
#[error("export error: {0}")]
Other(String),
}
impl ExportError for Error {
fn exporter_name(&self) -> &'static str {
"zipkin"
}
}
| {
Exporter {
local_endpoint,
uploader: uploader::Uploader::new(client, collector_endpoint),
}
} | identifier_body |
instance_metadata.rs | //! The Credentials Provider for an AWS Resource's IAM Role.
use async_trait::async_trait;
use hyper::Uri;
use std::time::Duration;
use crate::request::HttpClient;
use crate::{
parse_credentials_from_aws_service, AwsCredentials, CredentialsError, ProvideAwsCredentials,
};
const AWS_CREDENTIALS_PROVIDER_IP: &str = "169.254.169.254";
const AWS_CREDENTIALS_PROVIDER_PATH: &str = "latest/meta-data/iam/security-credentials";
/// Provides AWS credentials from a resource's IAM role.
///
/// The provider has a default timeout of 30 seconds. While it should work well for most setups,
/// you can change the timeout using the `set_timeout` method.
///
/// # Examples
///
/// ```rust
/// use std::time::Duration;
///
/// use rusoto_credential::InstanceMetadataProvider;
///
/// let mut provider = InstanceMetadataProvider::new();
/// // you can overwrite the default timeout like this:
/// provider.set_timeout(Duration::from_secs(60));
/// ```
///
/// The source location can be changed from the default of 169.254.169.254:
///
/// ```rust
/// use std::time::Duration;
///
/// use rusoto_credential::InstanceMetadataProvider;
///
/// let mut provider = InstanceMetadataProvider::new();
/// // you can overwrite the default endpoint like this:
/// provider.set_ip_addr_with_port("127.0.0.1", "8080");
/// ```
#[derive(Clone, Debug)]
pub struct InstanceMetadataProvider {
client: HttpClient,
timeout: Duration,
metadata_ip_addr: String,
}
impl InstanceMetadataProvider {
/// Create a new provider with the given handle.
pub fn new() -> Self {
InstanceMetadataProvider {
client: HttpClient::new(),
timeout: Duration::from_secs(30),
metadata_ip_addr: AWS_CREDENTIALS_PROVIDER_IP.to_string(),
}
}
/// Set the timeout on the provider to the specified duration.
pub fn set_timeout(&mut self, timeout: Duration) {
self.timeout = timeout;
}
/// Allow overriding host and port of instance metadata service.
pub fn set_ip_addr_with_port(&mut self, ip: &str, port: &str) {
self.metadata_ip_addr = format!("{}:{}", ip, port);
}
}
impl Default for InstanceMetadataProvider {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl ProvideAwsCredentials for InstanceMetadataProvider {
async fn | (&self) -> Result<AwsCredentials, CredentialsError> {
let role_name = get_role_name(&self.client, self.timeout, &self.metadata_ip_addr)
.await
.map_err(|err| CredentialsError {
message: format!("Could not get credentials from iam: {}", err.to_string()),
})?;
let cred_str = get_credentials_from_role(
&self.client,
self.timeout,
&role_name,
&self.metadata_ip_addr,
)
.await
.map_err(|err| CredentialsError {
message: format!("Could not get credentials from iam: {}", err.to_string()),
})?;
parse_credentials_from_aws_service(&cred_str)
}
}
/// Gets the role name to get credentials for using the IAM Metadata Service (169.254.169.254).
async fn get_role_name(
client: &HttpClient,
timeout: Duration,
ip_addr: &str,
) -> Result<String, CredentialsError> {
let role_name_address = format!("http://{}/{}/", ip_addr, AWS_CREDENTIALS_PROVIDER_PATH);
let uri = match role_name_address.parse::<Uri>() {
Ok(u) => u,
Err(e) => return Err(CredentialsError::new(e)),
};
Ok(client.get(uri, timeout).await?)
}
/// Gets the credentials for an EC2 Instances IAM Role.
async fn get_credentials_from_role(
client: &HttpClient,
timeout: Duration,
role_name: &str,
ip_addr: &str,
) -> Result<String, CredentialsError> {
let credentials_provider_url = format!(
"http://{}/{}/{}",
ip_addr, AWS_CREDENTIALS_PROVIDER_PATH, role_name
);
let uri = match credentials_provider_url.parse::<Uri>() {
Ok(u) => u,
Err(e) => return Err(CredentialsError::new(e)),
};
Ok(client.get(uri, timeout).await?)
}
| credentials | identifier_name |
instance_metadata.rs | //! The Credentials Provider for an AWS Resource's IAM Role.
use async_trait::async_trait;
use hyper::Uri;
use std::time::Duration;
use crate::request::HttpClient;
use crate::{
parse_credentials_from_aws_service, AwsCredentials, CredentialsError, ProvideAwsCredentials,
};
const AWS_CREDENTIALS_PROVIDER_IP: &str = "169.254.169.254";
const AWS_CREDENTIALS_PROVIDER_PATH: &str = "latest/meta-data/iam/security-credentials";
/// Provides AWS credentials from a resource's IAM role.
///
/// The provider has a default timeout of 30 seconds. While it should work well for most setups,
/// you can change the timeout using the `set_timeout` method.
///
/// # Examples
///
/// ```rust
/// use std::time::Duration;
///
/// use rusoto_credential::InstanceMetadataProvider;
///
/// let mut provider = InstanceMetadataProvider::new();
/// // you can overwrite the default timeout like this:
/// provider.set_timeout(Duration::from_secs(60));
/// ```
///
/// The source location can be changed from the default of 169.254.169.254:
///
/// ```rust
/// use std::time::Duration;
///
/// use rusoto_credential::InstanceMetadataProvider;
///
/// let mut provider = InstanceMetadataProvider::new();
/// // you can overwrite the default endpoint like this:
/// provider.set_ip_addr_with_port("127.0.0.1", "8080");
/// ```
#[derive(Clone, Debug)]
pub struct InstanceMetadataProvider {
client: HttpClient,
timeout: Duration,
metadata_ip_addr: String,
}
impl InstanceMetadataProvider {
/// Create a new provider with the given handle.
pub fn new() -> Self {
InstanceMetadataProvider {
client: HttpClient::new(),
timeout: Duration::from_secs(30),
metadata_ip_addr: AWS_CREDENTIALS_PROVIDER_IP.to_string(),
}
}
/// Set the timeout on the provider to the specified duration.
pub fn set_timeout(&mut self, timeout: Duration) {
self.timeout = timeout;
}
/// Allow overriding host and port of instance metadata service.
pub fn set_ip_addr_with_port(&mut self, ip: &str, port: &str) {
self.metadata_ip_addr = format!("{}:{}", ip, port);
}
}
impl Default for InstanceMetadataProvider {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl ProvideAwsCredentials for InstanceMetadataProvider {
async fn credentials(&self) -> Result<AwsCredentials, CredentialsError> |
}
/// Gets the role name to get credentials for using the IAM Metadata Service (169.254.169.254).
async fn get_role_name(
client: &HttpClient,
timeout: Duration,
ip_addr: &str,
) -> Result<String, CredentialsError> {
let role_name_address = format!("http://{}/{}/", ip_addr, AWS_CREDENTIALS_PROVIDER_PATH);
let uri = match role_name_address.parse::<Uri>() {
Ok(u) => u,
Err(e) => return Err(CredentialsError::new(e)),
};
Ok(client.get(uri, timeout).await?)
}
/// Gets the credentials for an EC2 Instances IAM Role.
async fn get_credentials_from_role(
client: &HttpClient,
timeout: Duration,
role_name: &str,
ip_addr: &str,
) -> Result<String, CredentialsError> {
let credentials_provider_url = format!(
"http://{}/{}/{}",
ip_addr, AWS_CREDENTIALS_PROVIDER_PATH, role_name
);
let uri = match credentials_provider_url.parse::<Uri>() {
Ok(u) => u,
Err(e) => return Err(CredentialsError::new(e)),
};
Ok(client.get(uri, timeout).await?)
}
| {
let role_name = get_role_name(&self.client, self.timeout, &self.metadata_ip_addr)
.await
.map_err(|err| CredentialsError {
message: format!("Could not get credentials from iam: {}", err.to_string()),
})?;
let cred_str = get_credentials_from_role(
&self.client,
self.timeout,
&role_name,
&self.metadata_ip_addr,
)
.await
.map_err(|err| CredentialsError {
message: format!("Could not get credentials from iam: {}", err.to_string()),
})?;
parse_credentials_from_aws_service(&cred_str)
} | identifier_body |
instance_metadata.rs | //! The Credentials Provider for an AWS Resource's IAM Role.
use async_trait::async_trait;
use hyper::Uri;
use std::time::Duration;
use crate::request::HttpClient;
use crate::{
parse_credentials_from_aws_service, AwsCredentials, CredentialsError, ProvideAwsCredentials,
};
const AWS_CREDENTIALS_PROVIDER_IP: &str = "169.254.169.254";
const AWS_CREDENTIALS_PROVIDER_PATH: &str = "latest/meta-data/iam/security-credentials";
/// Provides AWS credentials from a resource's IAM role.
///
/// The provider has a default timeout of 30 seconds. While it should work well for most setups,
/// you can change the timeout using the `set_timeout` method.
///
/// # Examples
///
/// ```rust
/// use std::time::Duration;
///
/// use rusoto_credential::InstanceMetadataProvider;
///
/// let mut provider = InstanceMetadataProvider::new();
/// // you can overwrite the default timeout like this:
/// provider.set_timeout(Duration::from_secs(60));
/// ```
///
/// The source location can be changed from the default of 169.254.169.254:
///
/// ```rust
/// use std::time::Duration;
///
/// use rusoto_credential::InstanceMetadataProvider;
///
/// let mut provider = InstanceMetadataProvider::new();
/// // you can overwrite the default endpoint like this:
/// provider.set_ip_addr_with_port("127.0.0.1", "8080");
/// ```
#[derive(Clone, Debug)]
pub struct InstanceMetadataProvider {
client: HttpClient,
timeout: Duration,
metadata_ip_addr: String,
}
impl InstanceMetadataProvider {
/// Create a new provider with the given handle.
pub fn new() -> Self {
InstanceMetadataProvider {
client: HttpClient::new(),
timeout: Duration::from_secs(30),
metadata_ip_addr: AWS_CREDENTIALS_PROVIDER_IP.to_string(),
}
}
/// Set the timeout on the provider to the specified duration.
pub fn set_timeout(&mut self, timeout: Duration) { | self.metadata_ip_addr = format!("{}:{}", ip, port);
}
}
impl Default for InstanceMetadataProvider {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl ProvideAwsCredentials for InstanceMetadataProvider {
async fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
let role_name = get_role_name(&self.client, self.timeout, &self.metadata_ip_addr)
.await
.map_err(|err| CredentialsError {
message: format!("Could not get credentials from iam: {}", err.to_string()),
})?;
let cred_str = get_credentials_from_role(
&self.client,
self.timeout,
&role_name,
&self.metadata_ip_addr,
)
.await
.map_err(|err| CredentialsError {
message: format!("Could not get credentials from iam: {}", err.to_string()),
})?;
parse_credentials_from_aws_service(&cred_str)
}
}
/// Gets the role name to get credentials for using the IAM Metadata Service (169.254.169.254).
async fn get_role_name(
client: &HttpClient,
timeout: Duration,
ip_addr: &str,
) -> Result<String, CredentialsError> {
let role_name_address = format!("http://{}/{}/", ip_addr, AWS_CREDENTIALS_PROVIDER_PATH);
let uri = match role_name_address.parse::<Uri>() {
Ok(u) => u,
Err(e) => return Err(CredentialsError::new(e)),
};
Ok(client.get(uri, timeout).await?)
}
/// Gets the credentials for an EC2 Instances IAM Role.
async fn get_credentials_from_role(
client: &HttpClient,
timeout: Duration,
role_name: &str,
ip_addr: &str,
) -> Result<String, CredentialsError> {
let credentials_provider_url = format!(
"http://{}/{}/{}",
ip_addr, AWS_CREDENTIALS_PROVIDER_PATH, role_name
);
let uri = match credentials_provider_url.parse::<Uri>() {
Ok(u) => u,
Err(e) => return Err(CredentialsError::new(e)),
};
Ok(client.get(uri, timeout).await?)
} | self.timeout = timeout;
}
/// Allow overriding host and port of instance metadata service.
pub fn set_ip_addr_with_port(&mut self, ip: &str, port: &str) { | random_line_split |
test_cargo_profiles.rs | use std::env;
use std::path::MAIN_SEPARATOR as SEP;
use support::{project, execs};
use support::{COMPILING, RUNNING};
use hamcrest::assert_that;
fn setup() |
test!(profile_overrides {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[package]
name = "test"
version = "0.0.0"
authors = []
[profile.dev]
opt-level = 1
debug = false
rpath = true
"#)
.file("src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v"),
execs().with_status(0).with_stdout(&format!("\
{compiling} test v0.0.0 ({url})
{running} `rustc src{sep}lib.rs --crate-name test --crate-type lib \
-C opt-level=1 \
-C debug-assertions=on \
-C metadata=[..] \
-C extra-filename=-[..] \
-C rpath \
--out-dir {dir}{sep}target{sep}debug \
--emit=dep-info,link \
-L dependency={dir}{sep}target{sep}debug \
-L dependency={dir}{sep}target{sep}debug{sep}deps`
",
running = RUNNING, compiling = COMPILING, sep = SEP,
dir = p.root().display(),
url = p.url(),
)));
});
test!(top_level_overrides_deps {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[package]
name = "test"
version = "0.0.0"
authors = []
[profile.release]
opt-level = 1
debug = true
[dependencies.foo]
path = "foo"
"#)
.file("src/lib.rs", "")
.file("foo/Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.0"
authors = []
[profile.release]
opt-level = 0
debug = false
[lib]
name = "foo"
crate_type = ["dylib", "rlib"]
"#)
.file("foo/src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v").arg("--release"),
execs().with_status(0).with_stdout(&format!("\
{compiling} foo v0.0.0 ({url})
{running} `rustc foo{sep}src{sep}lib.rs --crate-name foo \
--crate-type dylib --crate-type rlib -C prefer-dynamic \
-C opt-level=1 \
-g \
-C metadata=[..] \
-C extra-filename=-[..] \
--out-dir {dir}{sep}target{sep}release{sep}deps \
--emit=dep-info,link \
-L dependency={dir}{sep}target{sep}release{sep}deps \
-L dependency={dir}{sep}target{sep}release{sep}deps`
{compiling} test v0.0.0 ({url})
{running} `rustc src{sep}lib.rs --crate-name test --crate-type lib \
-C opt-level=1 \
-g \
-C metadata=[..] \
-C extra-filename=-[..] \
--out-dir {dir}{sep}target{sep}release \
--emit=dep-info,link \
-L dependency={dir}{sep}target{sep}release \
-L dependency={dir}{sep}target{sep}release{sep}deps \
--extern foo={dir}{sep}target{sep}release{sep}deps{sep}\
{prefix}foo-[..]{suffix} \
--extern foo={dir}{sep}target{sep}release{sep}deps{sep}libfoo-[..].rlib`
",
running = RUNNING,
compiling = COMPILING,
dir = p.root().display(),
url = p.url(),
sep = SEP,
prefix = env::consts::DLL_PREFIX,
suffix = env::consts::DLL_SUFFIX)));
});
| {
} | identifier_body |
test_cargo_profiles.rs | use std::env;
use std::path::MAIN_SEPARATOR as SEP;
use support::{project, execs};
use support::{COMPILING, RUNNING};
use hamcrest::assert_that;
fn | () {
}
test!(profile_overrides {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[package]
name = "test"
version = "0.0.0"
authors = []
[profile.dev]
opt-level = 1
debug = false
rpath = true
"#)
.file("src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v"),
execs().with_status(0).with_stdout(&format!("\
{compiling} test v0.0.0 ({url})
{running} `rustc src{sep}lib.rs --crate-name test --crate-type lib \
-C opt-level=1 \
-C debug-assertions=on \
-C metadata=[..] \
-C extra-filename=-[..] \
-C rpath \
--out-dir {dir}{sep}target{sep}debug \
--emit=dep-info,link \
-L dependency={dir}{sep}target{sep}debug \
-L dependency={dir}{sep}target{sep}debug{sep}deps`
",
running = RUNNING, compiling = COMPILING, sep = SEP,
dir = p.root().display(),
url = p.url(),
)));
});
test!(top_level_overrides_deps {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[package]
name = "test"
version = "0.0.0"
authors = []
[profile.release]
opt-level = 1
debug = true
[dependencies.foo]
path = "foo"
"#)
.file("src/lib.rs", "")
.file("foo/Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.0"
authors = []
[profile.release]
opt-level = 0
debug = false
[lib]
name = "foo"
crate_type = ["dylib", "rlib"]
"#)
.file("foo/src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v").arg("--release"),
execs().with_status(0).with_stdout(&format!("\
{compiling} foo v0.0.0 ({url})
{running} `rustc foo{sep}src{sep}lib.rs --crate-name foo \
--crate-type dylib --crate-type rlib -C prefer-dynamic \
-C opt-level=1 \
-g \
-C metadata=[..] \
-C extra-filename=-[..] \
--out-dir {dir}{sep}target{sep}release{sep}deps \
--emit=dep-info,link \
-L dependency={dir}{sep}target{sep}release{sep}deps \
-L dependency={dir}{sep}target{sep}release{sep}deps`
{compiling} test v0.0.0 ({url})
{running} `rustc src{sep}lib.rs --crate-name test --crate-type lib \
-C opt-level=1 \
-g \
-C metadata=[..] \
-C extra-filename=-[..] \
--out-dir {dir}{sep}target{sep}release \
--emit=dep-info,link \
-L dependency={dir}{sep}target{sep}release \
-L dependency={dir}{sep}target{sep}release{sep}deps \
--extern foo={dir}{sep}target{sep}release{sep}deps{sep}\
{prefix}foo-[..]{suffix} \
--extern foo={dir}{sep}target{sep}release{sep}deps{sep}libfoo-[..].rlib`
",
running = RUNNING,
compiling = COMPILING,
dir = p.root().display(),
url = p.url(),
sep = SEP,
prefix = env::consts::DLL_PREFIX,
suffix = env::consts::DLL_SUFFIX)));
});
| setup | identifier_name |
test_cargo_profiles.rs | use std::env;
use std::path::MAIN_SEPARATOR as SEP;
use support::{project, execs};
use support::{COMPILING, RUNNING};
use hamcrest::assert_that;
fn setup() {
}
test!(profile_overrides {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[package]
name = "test"
version = "0.0.0"
authors = []
[profile.dev]
opt-level = 1
debug = false
rpath = true
"#)
.file("src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v"),
execs().with_status(0).with_stdout(&format!("\
{compiling} test v0.0.0 ({url})
{running} `rustc src{sep}lib.rs --crate-name test --crate-type lib \
-C opt-level=1 \
-C debug-assertions=on \
-C metadata=[..] \
-C extra-filename=-[..] \
-C rpath \
--out-dir {dir}{sep}target{sep}debug \
--emit=dep-info,link \
-L dependency={dir}{sep}target{sep}debug \
-L dependency={dir}{sep}target{sep}debug{sep}deps`
",
running = RUNNING, compiling = COMPILING, sep = SEP,
dir = p.root().display(),
url = p.url(),
)));
});
test!(top_level_overrides_deps {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[package]
name = "test"
version = "0.0.0"
authors = []
[profile.release]
opt-level = 1
debug = true
[dependencies.foo]
path = "foo"
"#)
.file("src/lib.rs", "")
.file("foo/Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.0"
authors = []
[profile.release]
opt-level = 0
debug = false
[lib]
name = "foo"
crate_type = ["dylib", "rlib"]
"#)
.file("foo/src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v").arg("--release"),
execs().with_status(0).with_stdout(&format!("\
{compiling} foo v0.0.0 ({url})
{running} `rustc foo{sep}src{sep}lib.rs --crate-name foo \
--crate-type dylib --crate-type rlib -C prefer-dynamic \
-C opt-level=1 \
-g \
-C metadata=[..] \
-C extra-filename=-[..] \
--out-dir {dir}{sep}target{sep}release{sep}deps \
--emit=dep-info,link \
-L dependency={dir}{sep}target{sep}release{sep}deps \
-L dependency={dir}{sep}target{sep}release{sep}deps`
{compiling} test v0.0.0 ({url})
{running} `rustc src{sep}lib.rs --crate-name test --crate-type lib \
-C opt-level=1 \ | --emit=dep-info,link \
-L dependency={dir}{sep}target{sep}release \
-L dependency={dir}{sep}target{sep}release{sep}deps \
--extern foo={dir}{sep}target{sep}release{sep}deps{sep}\
{prefix}foo-[..]{suffix} \
--extern foo={dir}{sep}target{sep}release{sep}deps{sep}libfoo-[..].rlib`
",
running = RUNNING,
compiling = COMPILING,
dir = p.root().display(),
url = p.url(),
sep = SEP,
prefix = env::consts::DLL_PREFIX,
suffix = env::consts::DLL_SUFFIX)));
}); | -g \
-C metadata=[..] \
-C extra-filename=-[..] \
--out-dir {dir}{sep}target{sep}release \ | random_line_split |
font.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Implementation of Quartz (CoreGraphics) fonts.
extern crate core_foundation;
extern crate core_graphics;
extern crate core_text;
use app_units::Au;
use byteorder::{BigEndian, ByteOrder};
use core_foundation::base::CFIndex;
use core_foundation::data::CFData;
use core_foundation::string::UniChar;
use core_graphics::font::CGGlyph;
use core_graphics::geometry::CGRect;
use core_text::font::CTFont;
use core_text::font_descriptor::{SymbolicTraitAccessors, TraitAccessors};
use core_text::font_descriptor::kCTFontDefaultOrientation;
use font::{FontHandleMethods, FontMetrics, FontTableMethods, FontTableTag, FractionalPixel};
use font::{GPOS, GSUB, KERN};
use platform::font_template::FontTemplateData;
use platform::macos::font_context::FontContextHandle;
use std::{fmt, ptr};
use std::ops::Range;
use std::sync::Arc;
use style::computed_values::{font_stretch, font_weight};
use text::glyph::GlyphId;
const KERN_PAIR_LEN: usize = 6;
pub struct FontTable {
data: CFData,
}
// assumes 72 points per inch, and 96 px per inch
fn px_to_pt(px: f64) -> f64 {
px / 96. * 72.
}
// assumes 72 points per inch, and 96 px per inch
fn pt_to_px(pt: f64) -> f64 |
fn au_from_pt(pt: f64) -> Au {
Au::from_f64_px(pt_to_px(pt))
}
impl FontTable {
pub fn wrap(data: CFData) -> FontTable {
FontTable { data: data }
}
}
impl FontTableMethods for FontTable {
fn buffer(&self) -> &[u8] {
self.data.bytes()
}
}
#[derive(Debug)]
pub struct FontHandle {
font_data: Arc<FontTemplateData>,
ctfont: CTFont,
h_kern_subtable: Option<CachedKernTable>,
can_do_fast_shaping: bool,
}
impl FontHandle {
/// Cache all the data needed for basic horizontal kerning. This is used only as a fallback or
/// fast path (when the GPOS table is missing or unnecessary) so it needn't handle every case.
fn find_h_kern_subtable(&self) -> Option<CachedKernTable> {
let font_table = match self.table_for_tag(KERN) {
Some(table) => table,
None => return None
};
let mut result = CachedKernTable {
font_table: font_table,
pair_data_range: 0..0,
px_per_font_unit: 0.0,
};
// Look for a subtable with horizontal kerning in format 0.
// https://www.microsoft.com/typography/otspec/kern.htm
const KERN_COVERAGE_HORIZONTAL_FORMAT_0: u16 = 1;
const SUBTABLE_HEADER_LEN: usize = 6;
const FORMAT_0_HEADER_LEN: usize = 8;
{
let table = result.font_table.buffer();
let version = BigEndian::read_u16(table);
if version!= 0 {
return None;
}
let num_subtables = BigEndian::read_u16(&table[2..]);
let mut start = 4;
for _ in 0..num_subtables {
// TODO: Check the subtable version number?
let len = BigEndian::read_u16(&table[start + 2..]) as usize;
let cov = BigEndian::read_u16(&table[start + 4..]);
let end = start + len;
if cov == KERN_COVERAGE_HORIZONTAL_FORMAT_0 {
// Found a matching subtable.
if result.pair_data_range.len() > 0 {
debug!("Found multiple horizontal kern tables. Disable fast path.");
return None;
}
// Read the subtable header.
let subtable_start = start + SUBTABLE_HEADER_LEN;
let n_pairs = BigEndian::read_u16(&table[subtable_start..]) as usize;
let pair_data_start = subtable_start + FORMAT_0_HEADER_LEN;
result.pair_data_range = pair_data_start..end;
if result.pair_data_range.len()!= n_pairs * KERN_PAIR_LEN {
debug!("Bad data in kern header. Disable fast path.");
return None;
}
let pt_per_font_unit = self.ctfont.pt_size() as f64 /
self.ctfont.units_per_em() as f64;
result.px_per_font_unit = pt_to_px(pt_per_font_unit);
}
start = end;
}
}
if result.pair_data_range.len() > 0 {
Some(result)
} else {
None
}
}
}
struct CachedKernTable {
font_table: FontTable,
pair_data_range: Range<usize>,
px_per_font_unit: f64,
}
impl CachedKernTable {
/// Search for a glyph pair in the kern table and return the corresponding value.
fn binary_search(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> Option<i16> {
let pairs = &self.font_table.buffer()[self.pair_data_range.clone()];
let query = first_glyph << 16 | second_glyph;
let (mut start, mut end) = (0, pairs.len() / KERN_PAIR_LEN);
while start < end {
let i = (start + end) / 2;
let key = BigEndian::read_u32(&pairs[i * KERN_PAIR_LEN..]);
if key > query {
end = i;
} else if key < query {
start = i + 1;
} else {
return Some(BigEndian::read_i16(&pairs[i * KERN_PAIR_LEN + 4..]));
}
}
None
}
}
impl fmt::Debug for CachedKernTable {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "CachedKernTable")
}
}
impl FontHandleMethods for FontHandle {
fn new_from_template(_fctx: &FontContextHandle,
template: Arc<FontTemplateData>,
pt_size: Option<Au>)
-> Result<FontHandle, ()> {
let size = match pt_size {
Some(s) => s.to_f64_px(),
None => 0.0
};
match template.ctfont(size) {
Some(ref ctfont) => {
let mut handle = FontHandle {
font_data: template.clone(),
ctfont: ctfont.clone_with_font_size(size),
h_kern_subtable: None,
can_do_fast_shaping: false,
};
handle.h_kern_subtable = handle.find_h_kern_subtable();
// TODO (#11310): Implement basic support for GPOS and GSUB.
handle.can_do_fast_shaping = handle.h_kern_subtable.is_some() &&
handle.table_for_tag(GPOS).is_none() &&
handle.table_for_tag(GSUB).is_none();
Ok(handle)
}
None => {
Err(())
}
}
}
fn template(&self) -> Arc<FontTemplateData> {
self.font_data.clone()
}
fn family_name(&self) -> String {
self.ctfont.family_name()
}
fn face_name(&self) -> String {
self.ctfont.face_name()
}
fn is_italic(&self) -> bool {
self.ctfont.symbolic_traits().is_italic()
}
fn boldness(&self) -> font_weight::T {
let normalized = self.ctfont.all_traits().normalized_weight(); // [-1.0, 1.0]
let normalized = if normalized <= 0.0 {
4.0 + normalized * 3.0 // [1.0, 4.0]
} else {
4.0 + normalized * 5.0 // [4.0, 9.0]
}; // [1.0, 9.0], centered on 4.0
match normalized.round() as u32 {
1 => font_weight::T::Weight100,
2 => font_weight::T::Weight200,
3 => font_weight::T::Weight300,
4 => font_weight::T::Weight400,
5 => font_weight::T::Weight500,
6 => font_weight::T::Weight600,
7 => font_weight::T::Weight700,
8 => font_weight::T::Weight800,
_ => font_weight::T::Weight900,
}
}
fn stretchiness(&self) -> font_stretch::T {
let normalized = self.ctfont.all_traits().normalized_width(); // [-1.0, 1.0]
let normalized = (normalized + 1.0) / 2.0 * 9.0; // [0.0, 9.0]
match normalized {
v if v < 1.0 => font_stretch::T::ultra_condensed,
v if v < 2.0 => font_stretch::T::extra_condensed,
v if v < 3.0 => font_stretch::T::condensed,
v if v < 4.0 => font_stretch::T::semi_condensed,
v if v < 5.0 => font_stretch::T::normal,
v if v < 6.0 => font_stretch::T::semi_expanded,
v if v < 7.0 => font_stretch::T::expanded,
v if v < 8.0 => font_stretch::T::extra_expanded,
_ => font_stretch::T::ultra_expanded,
}
}
fn glyph_index(&self, codepoint: char) -> Option<GlyphId> {
let characters: [UniChar; 1] = [codepoint as UniChar];
let mut glyphs: [CGGlyph; 1] = [0 as CGGlyph];
let count: CFIndex = 1;
let result = self.ctfont.get_glyphs_for_characters(&characters[0],
&mut glyphs[0],
count);
if!result {
// No glyph for this character
return None;
}
assert!(glyphs[0]!= 0); // FIXME: error handling
return Some(glyphs[0] as GlyphId);
}
fn glyph_h_kerning(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> FractionalPixel {
if let Some(ref table) = self.h_kern_subtable {
if let Some(font_units) = table.binary_search(first_glyph, second_glyph) {
return font_units as f64 * table.px_per_font_unit;
}
}
0.0
}
fn can_do_fast_shaping(&self) -> bool {
self.can_do_fast_shaping
}
fn glyph_h_advance(&self, glyph: GlyphId) -> Option<FractionalPixel> {
let glyphs = [glyph as CGGlyph];
let advance = self.ctfont.get_advances_for_glyphs(kCTFontDefaultOrientation,
&glyphs[0],
ptr::null_mut(),
1);
Some(advance as FractionalPixel)
}
fn metrics(&self) -> FontMetrics {
let bounding_rect: CGRect = self.ctfont.bounding_box();
let ascent = self.ctfont.ascent() as f64;
let descent = self.ctfont.descent() as f64;
let em_size = Au::from_f64_px(self.ctfont.pt_size() as f64);
let leading = self.ctfont.leading() as f64;
let scale = px_to_pt(self.ctfont.pt_size() as f64) / (ascent + descent);
let line_gap = (ascent + descent + leading + 0.5).floor();
let max_advance_width = au_from_pt(bounding_rect.size.width as f64);
let average_advance = self.glyph_index('0')
.and_then(|idx| self.glyph_h_advance(idx))
.map(Au::from_f64_px)
.unwrap_or(max_advance_width);
let metrics = FontMetrics {
underline_size: au_from_pt(self.ctfont.underline_thickness() as f64),
// TODO(Issue #201): underline metrics are not reliable. Have to pull out of font table
// directly.
//
// see also: https://bugs.webkit.org/show_bug.cgi?id=16768
// see also: https://bugreports.qt-project.org/browse/QTBUG-13364
underline_offset: au_from_pt(self.ctfont.underline_position() as f64),
strikeout_size: Au(0), // FIXME(Issue #942)
strikeout_offset: Au(0), // FIXME(Issue #942)
leading: au_from_pt(leading),
x_height: au_from_pt((self.ctfont.x_height() as f64) * scale),
em_size: em_size,
ascent: au_from_pt(ascent * scale),
descent: au_from_pt(descent * scale),
max_advance: max_advance_width,
average_advance: average_advance,
line_gap: Au::from_f64_px(line_gap),
};
debug!("Font metrics (@{} pt): {:?}", self.ctfont.pt_size() as f64, metrics);
metrics
}
fn table_for_tag(&self, tag: FontTableTag) -> Option<FontTable> {
let result: Option<CFData> = self.ctfont.get_font_table(tag);
result.and_then(|data| {
Some(FontTable::wrap(data))
})
}
}
| {
pt / 72. * 96.
} | identifier_body |
font.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Implementation of Quartz (CoreGraphics) fonts.
extern crate core_foundation;
extern crate core_graphics;
extern crate core_text;
use app_units::Au;
use byteorder::{BigEndian, ByteOrder};
use core_foundation::base::CFIndex;
use core_foundation::data::CFData;
use core_foundation::string::UniChar;
use core_graphics::font::CGGlyph;
use core_graphics::geometry::CGRect;
use core_text::font::CTFont;
use core_text::font_descriptor::{SymbolicTraitAccessors, TraitAccessors};
use core_text::font_descriptor::kCTFontDefaultOrientation;
use font::{FontHandleMethods, FontMetrics, FontTableMethods, FontTableTag, FractionalPixel};
use font::{GPOS, GSUB, KERN};
use platform::font_template::FontTemplateData;
use platform::macos::font_context::FontContextHandle;
use std::{fmt, ptr};
use std::ops::Range;
use std::sync::Arc;
use style::computed_values::{font_stretch, font_weight};
use text::glyph::GlyphId;
const KERN_PAIR_LEN: usize = 6;
pub struct FontTable {
data: CFData,
}
// assumes 72 points per inch, and 96 px per inch
fn px_to_pt(px: f64) -> f64 {
px / 96. * 72.
}
// assumes 72 points per inch, and 96 px per inch
fn pt_to_px(pt: f64) -> f64 {
pt / 72. * 96.
}
fn au_from_pt(pt: f64) -> Au {
Au::from_f64_px(pt_to_px(pt))
}
impl FontTable {
pub fn wrap(data: CFData) -> FontTable {
FontTable { data: data }
}
}
impl FontTableMethods for FontTable {
fn buffer(&self) -> &[u8] {
self.data.bytes()
}
}
#[derive(Debug)]
pub struct FontHandle {
font_data: Arc<FontTemplateData>,
ctfont: CTFont,
h_kern_subtable: Option<CachedKernTable>,
can_do_fast_shaping: bool,
}
impl FontHandle {
/// Cache all the data needed for basic horizontal kerning. This is used only as a fallback or
/// fast path (when the GPOS table is missing or unnecessary) so it needn't handle every case.
fn find_h_kern_subtable(&self) -> Option<CachedKernTable> {
let font_table = match self.table_for_tag(KERN) {
Some(table) => table,
None => return None
};
let mut result = CachedKernTable {
font_table: font_table,
pair_data_range: 0..0,
px_per_font_unit: 0.0,
};
// Look for a subtable with horizontal kerning in format 0.
// https://www.microsoft.com/typography/otspec/kern.htm
const KERN_COVERAGE_HORIZONTAL_FORMAT_0: u16 = 1;
const SUBTABLE_HEADER_LEN: usize = 6;
const FORMAT_0_HEADER_LEN: usize = 8;
{
let table = result.font_table.buffer();
let version = BigEndian::read_u16(table);
if version!= 0 {
return None;
}
let num_subtables = BigEndian::read_u16(&table[2..]);
let mut start = 4;
for _ in 0..num_subtables {
// TODO: Check the subtable version number?
let len = BigEndian::read_u16(&table[start + 2..]) as usize;
let cov = BigEndian::read_u16(&table[start + 4..]);
let end = start + len;
if cov == KERN_COVERAGE_HORIZONTAL_FORMAT_0 {
// Found a matching subtable.
if result.pair_data_range.len() > 0 {
debug!("Found multiple horizontal kern tables. Disable fast path.");
return None;
}
// Read the subtable header.
let subtable_start = start + SUBTABLE_HEADER_LEN;
let n_pairs = BigEndian::read_u16(&table[subtable_start..]) as usize;
let pair_data_start = subtable_start + FORMAT_0_HEADER_LEN;
result.pair_data_range = pair_data_start..end;
if result.pair_data_range.len()!= n_pairs * KERN_PAIR_LEN {
debug!("Bad data in kern header. Disable fast path.");
return None;
}
let pt_per_font_unit = self.ctfont.pt_size() as f64 /
self.ctfont.units_per_em() as f64;
result.px_per_font_unit = pt_to_px(pt_per_font_unit);
}
start = end;
}
}
if result.pair_data_range.len() > 0 {
Some(result)
} else {
None
}
}
}
struct CachedKernTable {
font_table: FontTable,
pair_data_range: Range<usize>,
px_per_font_unit: f64,
}
impl CachedKernTable {
/// Search for a glyph pair in the kern table and return the corresponding value.
fn binary_search(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> Option<i16> {
let pairs = &self.font_table.buffer()[self.pair_data_range.clone()];
let query = first_glyph << 16 | second_glyph;
let (mut start, mut end) = (0, pairs.len() / KERN_PAIR_LEN);
while start < end {
let i = (start + end) / 2;
let key = BigEndian::read_u32(&pairs[i * KERN_PAIR_LEN..]);
if key > query {
end = i;
} else if key < query {
start = i + 1;
} else {
return Some(BigEndian::read_i16(&pairs[i * KERN_PAIR_LEN + 4..]));
}
}
None
}
}
impl fmt::Debug for CachedKernTable {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "CachedKernTable")
}
}
impl FontHandleMethods for FontHandle {
fn new_from_template(_fctx: &FontContextHandle,
template: Arc<FontTemplateData>,
pt_size: Option<Au>)
-> Result<FontHandle, ()> {
let size = match pt_size {
Some(s) => s.to_f64_px(),
None => 0.0
};
match template.ctfont(size) {
Some(ref ctfont) => {
let mut handle = FontHandle {
font_data: template.clone(),
ctfont: ctfont.clone_with_font_size(size),
h_kern_subtable: None,
can_do_fast_shaping: false,
};
handle.h_kern_subtable = handle.find_h_kern_subtable();
// TODO (#11310): Implement basic support for GPOS and GSUB.
handle.can_do_fast_shaping = handle.h_kern_subtable.is_some() &&
handle.table_for_tag(GPOS).is_none() &&
handle.table_for_tag(GSUB).is_none();
Ok(handle)
}
None => {
Err(())
}
}
}
fn template(&self) -> Arc<FontTemplateData> {
self.font_data.clone()
}
fn | (&self) -> String {
self.ctfont.family_name()
}
fn face_name(&self) -> String {
self.ctfont.face_name()
}
fn is_italic(&self) -> bool {
self.ctfont.symbolic_traits().is_italic()
}
fn boldness(&self) -> font_weight::T {
let normalized = self.ctfont.all_traits().normalized_weight(); // [-1.0, 1.0]
let normalized = if normalized <= 0.0 {
4.0 + normalized * 3.0 // [1.0, 4.0]
} else {
4.0 + normalized * 5.0 // [4.0, 9.0]
}; // [1.0, 9.0], centered on 4.0
match normalized.round() as u32 {
1 => font_weight::T::Weight100,
2 => font_weight::T::Weight200,
3 => font_weight::T::Weight300,
4 => font_weight::T::Weight400,
5 => font_weight::T::Weight500,
6 => font_weight::T::Weight600,
7 => font_weight::T::Weight700,
8 => font_weight::T::Weight800,
_ => font_weight::T::Weight900,
}
}
fn stretchiness(&self) -> font_stretch::T {
let normalized = self.ctfont.all_traits().normalized_width(); // [-1.0, 1.0]
let normalized = (normalized + 1.0) / 2.0 * 9.0; // [0.0, 9.0]
match normalized {
v if v < 1.0 => font_stretch::T::ultra_condensed,
v if v < 2.0 => font_stretch::T::extra_condensed,
v if v < 3.0 => font_stretch::T::condensed,
v if v < 4.0 => font_stretch::T::semi_condensed,
v if v < 5.0 => font_stretch::T::normal,
v if v < 6.0 => font_stretch::T::semi_expanded,
v if v < 7.0 => font_stretch::T::expanded,
v if v < 8.0 => font_stretch::T::extra_expanded,
_ => font_stretch::T::ultra_expanded,
}
}
fn glyph_index(&self, codepoint: char) -> Option<GlyphId> {
let characters: [UniChar; 1] = [codepoint as UniChar];
let mut glyphs: [CGGlyph; 1] = [0 as CGGlyph];
let count: CFIndex = 1;
let result = self.ctfont.get_glyphs_for_characters(&characters[0],
&mut glyphs[0],
count);
if!result {
// No glyph for this character
return None;
}
assert!(glyphs[0]!= 0); // FIXME: error handling
return Some(glyphs[0] as GlyphId);
}
fn glyph_h_kerning(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> FractionalPixel {
if let Some(ref table) = self.h_kern_subtable {
if let Some(font_units) = table.binary_search(first_glyph, second_glyph) {
return font_units as f64 * table.px_per_font_unit;
}
}
0.0
}
fn can_do_fast_shaping(&self) -> bool {
self.can_do_fast_shaping
}
fn glyph_h_advance(&self, glyph: GlyphId) -> Option<FractionalPixel> {
let glyphs = [glyph as CGGlyph];
let advance = self.ctfont.get_advances_for_glyphs(kCTFontDefaultOrientation,
&glyphs[0],
ptr::null_mut(),
1);
Some(advance as FractionalPixel)
}
fn metrics(&self) -> FontMetrics {
let bounding_rect: CGRect = self.ctfont.bounding_box();
let ascent = self.ctfont.ascent() as f64;
let descent = self.ctfont.descent() as f64;
let em_size = Au::from_f64_px(self.ctfont.pt_size() as f64);
let leading = self.ctfont.leading() as f64;
let scale = px_to_pt(self.ctfont.pt_size() as f64) / (ascent + descent);
let line_gap = (ascent + descent + leading + 0.5).floor();
let max_advance_width = au_from_pt(bounding_rect.size.width as f64);
let average_advance = self.glyph_index('0')
.and_then(|idx| self.glyph_h_advance(idx))
.map(Au::from_f64_px)
.unwrap_or(max_advance_width);
let metrics = FontMetrics {
underline_size: au_from_pt(self.ctfont.underline_thickness() as f64),
// TODO(Issue #201): underline metrics are not reliable. Have to pull out of font table
// directly.
//
// see also: https://bugs.webkit.org/show_bug.cgi?id=16768
// see also: https://bugreports.qt-project.org/browse/QTBUG-13364
underline_offset: au_from_pt(self.ctfont.underline_position() as f64),
strikeout_size: Au(0), // FIXME(Issue #942)
strikeout_offset: Au(0), // FIXME(Issue #942)
leading: au_from_pt(leading),
x_height: au_from_pt((self.ctfont.x_height() as f64) * scale),
em_size: em_size,
ascent: au_from_pt(ascent * scale),
descent: au_from_pt(descent * scale),
max_advance: max_advance_width,
average_advance: average_advance,
line_gap: Au::from_f64_px(line_gap),
};
debug!("Font metrics (@{} pt): {:?}", self.ctfont.pt_size() as f64, metrics);
metrics
}
fn table_for_tag(&self, tag: FontTableTag) -> Option<FontTable> {
let result: Option<CFData> = self.ctfont.get_font_table(tag);
result.and_then(|data| {
Some(FontTable::wrap(data))
})
}
}
| family_name | identifier_name |
font.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Implementation of Quartz (CoreGraphics) fonts.
extern crate core_foundation;
extern crate core_graphics;
extern crate core_text;
use app_units::Au;
use byteorder::{BigEndian, ByteOrder};
use core_foundation::base::CFIndex;
use core_foundation::data::CFData;
use core_foundation::string::UniChar;
use core_graphics::font::CGGlyph;
use core_graphics::geometry::CGRect;
use core_text::font::CTFont;
use core_text::font_descriptor::{SymbolicTraitAccessors, TraitAccessors};
use core_text::font_descriptor::kCTFontDefaultOrientation;
use font::{FontHandleMethods, FontMetrics, FontTableMethods, FontTableTag, FractionalPixel};
use font::{GPOS, GSUB, KERN};
use platform::font_template::FontTemplateData;
use platform::macos::font_context::FontContextHandle;
use std::{fmt, ptr};
use std::ops::Range;
use std::sync::Arc;
use style::computed_values::{font_stretch, font_weight};
use text::glyph::GlyphId;
const KERN_PAIR_LEN: usize = 6;
pub struct FontTable {
data: CFData,
}
// assumes 72 points per inch, and 96 px per inch
fn px_to_pt(px: f64) -> f64 {
px / 96. * 72.
}
// assumes 72 points per inch, and 96 px per inch
fn pt_to_px(pt: f64) -> f64 {
pt / 72. * 96.
}
fn au_from_pt(pt: f64) -> Au {
Au::from_f64_px(pt_to_px(pt))
}
impl FontTable {
pub fn wrap(data: CFData) -> FontTable {
FontTable { data: data }
}
}
impl FontTableMethods for FontTable {
fn buffer(&self) -> &[u8] {
self.data.bytes()
}
}
#[derive(Debug)]
pub struct FontHandle {
font_data: Arc<FontTemplateData>,
ctfont: CTFont,
h_kern_subtable: Option<CachedKernTable>,
can_do_fast_shaping: bool,
}
impl FontHandle {
/// Cache all the data needed for basic horizontal kerning. This is used only as a fallback or
/// fast path (when the GPOS table is missing or unnecessary) so it needn't handle every case.
fn find_h_kern_subtable(&self) -> Option<CachedKernTable> {
let font_table = match self.table_for_tag(KERN) {
Some(table) => table,
None => return None
};
let mut result = CachedKernTable {
font_table: font_table,
pair_data_range: 0..0,
px_per_font_unit: 0.0,
};
// Look for a subtable with horizontal kerning in format 0.
// https://www.microsoft.com/typography/otspec/kern.htm
const KERN_COVERAGE_HORIZONTAL_FORMAT_0: u16 = 1;
const SUBTABLE_HEADER_LEN: usize = 6;
const FORMAT_0_HEADER_LEN: usize = 8;
{
let table = result.font_table.buffer();
let version = BigEndian::read_u16(table);
if version!= 0 {
return None;
}
let num_subtables = BigEndian::read_u16(&table[2..]);
let mut start = 4;
for _ in 0..num_subtables {
// TODO: Check the subtable version number?
let len = BigEndian::read_u16(&table[start + 2..]) as usize;
let cov = BigEndian::read_u16(&table[start + 4..]);
let end = start + len;
if cov == KERN_COVERAGE_HORIZONTAL_FORMAT_0 {
// Found a matching subtable.
if result.pair_data_range.len() > 0 {
debug!("Found multiple horizontal kern tables. Disable fast path.");
return None;
}
// Read the subtable header.
let subtable_start = start + SUBTABLE_HEADER_LEN;
let n_pairs = BigEndian::read_u16(&table[subtable_start..]) as usize;
let pair_data_start = subtable_start + FORMAT_0_HEADER_LEN;
result.pair_data_range = pair_data_start..end;
if result.pair_data_range.len()!= n_pairs * KERN_PAIR_LEN {
debug!("Bad data in kern header. Disable fast path.");
return None;
}
let pt_per_font_unit = self.ctfont.pt_size() as f64 /
self.ctfont.units_per_em() as f64;
result.px_per_font_unit = pt_to_px(pt_per_font_unit);
}
start = end;
}
}
if result.pair_data_range.len() > 0 {
Some(result)
} else {
None
}
}
}
struct CachedKernTable {
font_table: FontTable,
pair_data_range: Range<usize>,
px_per_font_unit: f64,
}
impl CachedKernTable {
/// Search for a glyph pair in the kern table and return the corresponding value.
fn binary_search(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> Option<i16> {
let pairs = &self.font_table.buffer()[self.pair_data_range.clone()];
let query = first_glyph << 16 | second_glyph;
let (mut start, mut end) = (0, pairs.len() / KERN_PAIR_LEN);
while start < end {
let i = (start + end) / 2;
let key = BigEndian::read_u32(&pairs[i * KERN_PAIR_LEN..]);
if key > query {
end = i;
} else if key < query {
start = i + 1;
} else {
return Some(BigEndian::read_i16(&pairs[i * KERN_PAIR_LEN + 4..]));
}
}
None
}
}
impl fmt::Debug for CachedKernTable {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "CachedKernTable")
}
}
impl FontHandleMethods for FontHandle {
fn new_from_template(_fctx: &FontContextHandle,
template: Arc<FontTemplateData>,
pt_size: Option<Au>)
-> Result<FontHandle, ()> {
let size = match pt_size {
Some(s) => s.to_f64_px(),
None => 0.0
};
match template.ctfont(size) {
Some(ref ctfont) => {
let mut handle = FontHandle {
font_data: template.clone(),
ctfont: ctfont.clone_with_font_size(size),
h_kern_subtable: None,
can_do_fast_shaping: false,
};
handle.h_kern_subtable = handle.find_h_kern_subtable();
// TODO (#11310): Implement basic support for GPOS and GSUB.
handle.can_do_fast_shaping = handle.h_kern_subtable.is_some() &&
handle.table_for_tag(GPOS).is_none() &&
handle.table_for_tag(GSUB).is_none();
Ok(handle)
}
None => {
Err(())
}
}
} |
fn family_name(&self) -> String {
self.ctfont.family_name()
}
fn face_name(&self) -> String {
self.ctfont.face_name()
}
fn is_italic(&self) -> bool {
self.ctfont.symbolic_traits().is_italic()
}
fn boldness(&self) -> font_weight::T {
let normalized = self.ctfont.all_traits().normalized_weight(); // [-1.0, 1.0]
let normalized = if normalized <= 0.0 {
4.0 + normalized * 3.0 // [1.0, 4.0]
} else {
4.0 + normalized * 5.0 // [4.0, 9.0]
}; // [1.0, 9.0], centered on 4.0
match normalized.round() as u32 {
1 => font_weight::T::Weight100,
2 => font_weight::T::Weight200,
3 => font_weight::T::Weight300,
4 => font_weight::T::Weight400,
5 => font_weight::T::Weight500,
6 => font_weight::T::Weight600,
7 => font_weight::T::Weight700,
8 => font_weight::T::Weight800,
_ => font_weight::T::Weight900,
}
}
fn stretchiness(&self) -> font_stretch::T {
let normalized = self.ctfont.all_traits().normalized_width(); // [-1.0, 1.0]
let normalized = (normalized + 1.0) / 2.0 * 9.0; // [0.0, 9.0]
match normalized {
v if v < 1.0 => font_stretch::T::ultra_condensed,
v if v < 2.0 => font_stretch::T::extra_condensed,
v if v < 3.0 => font_stretch::T::condensed,
v if v < 4.0 => font_stretch::T::semi_condensed,
v if v < 5.0 => font_stretch::T::normal,
v if v < 6.0 => font_stretch::T::semi_expanded,
v if v < 7.0 => font_stretch::T::expanded,
v if v < 8.0 => font_stretch::T::extra_expanded,
_ => font_stretch::T::ultra_expanded,
}
}
fn glyph_index(&self, codepoint: char) -> Option<GlyphId> {
let characters: [UniChar; 1] = [codepoint as UniChar];
let mut glyphs: [CGGlyph; 1] = [0 as CGGlyph];
let count: CFIndex = 1;
let result = self.ctfont.get_glyphs_for_characters(&characters[0],
&mut glyphs[0],
count);
if!result {
// No glyph for this character
return None;
}
assert!(glyphs[0]!= 0); // FIXME: error handling
return Some(glyphs[0] as GlyphId);
}
fn glyph_h_kerning(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> FractionalPixel {
if let Some(ref table) = self.h_kern_subtable {
if let Some(font_units) = table.binary_search(first_glyph, second_glyph) {
return font_units as f64 * table.px_per_font_unit;
}
}
0.0
}
fn can_do_fast_shaping(&self) -> bool {
self.can_do_fast_shaping
}
fn glyph_h_advance(&self, glyph: GlyphId) -> Option<FractionalPixel> {
let glyphs = [glyph as CGGlyph];
let advance = self.ctfont.get_advances_for_glyphs(kCTFontDefaultOrientation,
&glyphs[0],
ptr::null_mut(),
1);
Some(advance as FractionalPixel)
}
fn metrics(&self) -> FontMetrics {
let bounding_rect: CGRect = self.ctfont.bounding_box();
let ascent = self.ctfont.ascent() as f64;
let descent = self.ctfont.descent() as f64;
let em_size = Au::from_f64_px(self.ctfont.pt_size() as f64);
let leading = self.ctfont.leading() as f64;
let scale = px_to_pt(self.ctfont.pt_size() as f64) / (ascent + descent);
let line_gap = (ascent + descent + leading + 0.5).floor();
let max_advance_width = au_from_pt(bounding_rect.size.width as f64);
let average_advance = self.glyph_index('0')
.and_then(|idx| self.glyph_h_advance(idx))
.map(Au::from_f64_px)
.unwrap_or(max_advance_width);
let metrics = FontMetrics {
underline_size: au_from_pt(self.ctfont.underline_thickness() as f64),
// TODO(Issue #201): underline metrics are not reliable. Have to pull out of font table
// directly.
//
// see also: https://bugs.webkit.org/show_bug.cgi?id=16768
// see also: https://bugreports.qt-project.org/browse/QTBUG-13364
underline_offset: au_from_pt(self.ctfont.underline_position() as f64),
strikeout_size: Au(0), // FIXME(Issue #942)
strikeout_offset: Au(0), // FIXME(Issue #942)
leading: au_from_pt(leading),
x_height: au_from_pt((self.ctfont.x_height() as f64) * scale),
em_size: em_size,
ascent: au_from_pt(ascent * scale),
descent: au_from_pt(descent * scale),
max_advance: max_advance_width,
average_advance: average_advance,
line_gap: Au::from_f64_px(line_gap),
};
debug!("Font metrics (@{} pt): {:?}", self.ctfont.pt_size() as f64, metrics);
metrics
}
fn table_for_tag(&self, tag: FontTableTag) -> Option<FontTable> {
let result: Option<CFData> = self.ctfont.get_font_table(tag);
result.and_then(|data| {
Some(FontTable::wrap(data))
})
}
} |
fn template(&self) -> Arc<FontTemplateData> {
self.font_data.clone()
} | random_line_split |
profile.rs | use crate::{Error, Result};
use chrono::{DateTime, Utc};
use colored::Colorize;
use serde::Deserialize;
use std::fs::File;
use std::io::{self, Read};
use std::path::{Path, PathBuf};
use std::time::SystemTime;
/// Represents a file with a provisioning profile info.
#[derive(Debug, Clone)]
pub struct Profile {
pub path: PathBuf,
pub info: Info,
}
impl Profile {
/// Returns instance of the `Profile` parsed from a file.
pub fn from_file(path: &Path) -> Result<Self> {
let mut buf = Vec::new();
File::open(path)?.read_to_end(&mut buf)?;
let info =
Info::from_xml_data(&buf).ok_or_else(|| Error::Own("Couldn't parse file.".into()))?;
Ok(Self {
path: path.to_owned(),
info,
})
}
}
/// Represents provisioning profile info.
#[derive(Debug, PartialEq, Clone)]
pub struct Info {
pub uuid: String,
pub name: String,
pub app_identifier: String,
pub creation_date: SystemTime,
pub expiration_date: SystemTime,
}
#[derive(Debug, Deserialize)]
struct InfoDef {
#[serde(rename = "UUID")]
pub uuid: String,
#[serde(rename = "Name")]
pub name: String,
#[serde(rename = "Entitlements")]
pub entitlements: Entitlements,
#[serde(rename = "CreationDate")]
pub creation_date: plist::Date,
#[serde(rename = "ExpirationDate")]
pub expiration_date: plist::Date,
}
#[derive(Debug, Deserialize)]
struct Entitlements {
#[serde(rename = "application-identifier")]
pub app_identifier: String,
}
impl Info {
/// Returns instance of the `Profile` parsed from a `data`.
pub fn from_xml_data(data: &[u8]) -> Option<Self> {
crate::plist_extractor::find(data).and_then(|xml| {
plist::from_reader_xml(io::Cursor::new(xml))
.ok()
.map(|info: InfoDef| Self {
uuid: info.uuid,
name: info.name,
app_identifier: info.entitlements.app_identifier,
creation_date: info.creation_date.into(),
expiration_date: info.expiration_date.into(),
})
})
}
/// Returns an empty profile info.
pub fn empty() -> Self {
Self {
uuid: "".into(),
name: "".into(),
app_identifier: "".into(),
creation_date: SystemTime::UNIX_EPOCH,
expiration_date: SystemTime::UNIX_EPOCH,
}
}
/// Returns `true` if one or more fields of the profile contain `string`.
pub fn contains(&self, string: &str) -> bool {
let s = string.to_lowercase();
let items = &[&self.name, &self.app_identifier, &self.uuid];
for item in items {
if item.to_lowercase().contains(&s) {
return true;
}
}
false
}
/// Returns a bundle id of a profile.
pub fn bundle_id(&self) -> Option<&str> {
self.app_identifier
.find(|ch| ch == '.')
.map(|i| &self.app_identifier[(i + 1)..])
}
/// Returns profile in a text form.
pub fn description(&self, oneline: bool) -> String {
if oneline {
return format!(
"{} {} {} {}",
self.uuid.yellow(),
DateTime::<Utc>::from(self.expiration_date)
.format("%Y-%m-%d")
.to_string()
.blue(),
self.app_identifier.green(),
self.name
);
} else {
let dates = format!(
"{} - {}",
DateTime::<Utc>::from(self.creation_date),
DateTime::<Utc>::from(self.expiration_date)
)
.blue();
return format!(
"{}\n{}\n{}\n{}",
self.uuid.yellow(),
self.app_identifier.green(),
self.name,
dates
);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use expectest::expect;
use expectest::prelude::*;
#[test]
fn contains() {
let profile = Info {
uuid: "123".into(),
name: "name".into(),
app_identifier: "id".into(),
creation_date: SystemTime::UNIX_EPOCH,
expiration_date: SystemTime::UNIX_EPOCH,
};
expect!(profile.contains("12")).to(be_true());
expect!(profile.contains("me")).to(be_true());
expect!(profile.contains("id")).to(be_true());
}
#[test]
fn | () {
let mut profile = Info::empty();
profile.app_identifier = "12345ABCDE.com.exmaple.app".to_owned();
expect!(profile.bundle_id()).to(be_some().value("com.exmaple.app"));
}
#[test]
fn incorrect_bundle_id() {
let mut profile = Info::empty();
profile.app_identifier = "12345ABCDE".to_owned();
expect!(profile.bundle_id()).to(be_none());
}
#[test]
fn wildcard_bundle_id() {
let mut profile = Info::empty();
profile.app_identifier = "12345ABCDE.*".to_owned();
expect!(profile.bundle_id()).to(be_some().value("*"));
}
}
| correct_bundle_id | identifier_name |
profile.rs | use crate::{Error, Result};
use chrono::{DateTime, Utc};
use colored::Colorize;
use serde::Deserialize;
use std::fs::File;
use std::io::{self, Read};
use std::path::{Path, PathBuf};
use std::time::SystemTime;
/// Represents a file with a provisioning profile info.
#[derive(Debug, Clone)]
pub struct Profile {
pub path: PathBuf,
pub info: Info,
}
impl Profile {
/// Returns instance of the `Profile` parsed from a file.
pub fn from_file(path: &Path) -> Result<Self> {
let mut buf = Vec::new();
File::open(path)?.read_to_end(&mut buf)?;
let info =
Info::from_xml_data(&buf).ok_or_else(|| Error::Own("Couldn't parse file.".into()))?;
Ok(Self {
path: path.to_owned(),
info,
})
}
}
/// Represents provisioning profile info.
#[derive(Debug, PartialEq, Clone)]
pub struct Info {
pub uuid: String,
pub name: String,
pub app_identifier: String,
pub creation_date: SystemTime,
pub expiration_date: SystemTime,
}
#[derive(Debug, Deserialize)]
struct InfoDef {
#[serde(rename = "UUID")]
pub uuid: String,
#[serde(rename = "Name")]
pub name: String,
#[serde(rename = "Entitlements")]
pub entitlements: Entitlements,
#[serde(rename = "CreationDate")]
pub creation_date: plist::Date,
#[serde(rename = "ExpirationDate")]
pub expiration_date: plist::Date,
}
#[derive(Debug, Deserialize)]
struct Entitlements {
#[serde(rename = "application-identifier")]
pub app_identifier: String,
}
impl Info {
/// Returns instance of the `Profile` parsed from a `data`.
pub fn from_xml_data(data: &[u8]) -> Option<Self> {
crate::plist_extractor::find(data).and_then(|xml| {
plist::from_reader_xml(io::Cursor::new(xml))
.ok()
.map(|info: InfoDef| Self {
uuid: info.uuid,
name: info.name,
app_identifier: info.entitlements.app_identifier,
creation_date: info.creation_date.into(),
expiration_date: info.expiration_date.into(),
})
})
}
/// Returns an empty profile info.
pub fn empty() -> Self {
Self {
uuid: "".into(),
name: "".into(),
app_identifier: "".into(),
creation_date: SystemTime::UNIX_EPOCH,
expiration_date: SystemTime::UNIX_EPOCH,
}
}
/// Returns `true` if one or more fields of the profile contain `string`.
pub fn contains(&self, string: &str) -> bool {
let s = string.to_lowercase();
let items = &[&self.name, &self.app_identifier, &self.uuid];
for item in items {
if item.to_lowercase().contains(&s) {
return true;
}
}
false
}
/// Returns a bundle id of a profile.
pub fn bundle_id(&self) -> Option<&str> {
self.app_identifier
.find(|ch| ch == '.')
.map(|i| &self.app_identifier[(i + 1)..])
}
/// Returns profile in a text form.
pub fn description(&self, oneline: bool) -> String {
if oneline {
return format!(
"{} {} {} {}",
self.uuid.yellow(),
DateTime::<Utc>::from(self.expiration_date)
.format("%Y-%m-%d")
.to_string()
.blue(),
self.app_identifier.green(),
self.name
);
} else {
let dates = format!(
"{} - {}",
DateTime::<Utc>::from(self.creation_date),
DateTime::<Utc>::from(self.expiration_date)
)
.blue();
return format!(
"{}\n{}\n{}\n{}",
self.uuid.yellow(),
self.app_identifier.green(),
self.name,
dates
);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use expectest::expect;
use expectest::prelude::*;
#[test]
fn contains() {
let profile = Info {
uuid: "123".into(),
name: "name".into(),
app_identifier: "id".into(),
creation_date: SystemTime::UNIX_EPOCH,
expiration_date: SystemTime::UNIX_EPOCH,
};
expect!(profile.contains("12")).to(be_true());
expect!(profile.contains("me")).to(be_true());
expect!(profile.contains("id")).to(be_true());
}
#[test]
fn correct_bundle_id() {
let mut profile = Info::empty();
profile.app_identifier = "12345ABCDE.com.exmaple.app".to_owned();
expect!(profile.bundle_id()).to(be_some().value("com.exmaple.app"));
}
#[test]
fn incorrect_bundle_id() {
let mut profile = Info::empty();
profile.app_identifier = "12345ABCDE".to_owned();
expect!(profile.bundle_id()).to(be_none());
}
| fn wildcard_bundle_id() {
let mut profile = Info::empty();
profile.app_identifier = "12345ABCDE.*".to_owned();
expect!(profile.bundle_id()).to(be_some().value("*"));
}
} | #[test] | random_line_split |
profile.rs | use crate::{Error, Result};
use chrono::{DateTime, Utc};
use colored::Colorize;
use serde::Deserialize;
use std::fs::File;
use std::io::{self, Read};
use std::path::{Path, PathBuf};
use std::time::SystemTime;
/// Represents a file with a provisioning profile info.
#[derive(Debug, Clone)]
pub struct Profile {
pub path: PathBuf,
pub info: Info,
}
impl Profile {
/// Returns instance of the `Profile` parsed from a file.
pub fn from_file(path: &Path) -> Result<Self> {
let mut buf = Vec::new();
File::open(path)?.read_to_end(&mut buf)?;
let info =
Info::from_xml_data(&buf).ok_or_else(|| Error::Own("Couldn't parse file.".into()))?;
Ok(Self {
path: path.to_owned(),
info,
})
}
}
/// Represents provisioning profile info.
#[derive(Debug, PartialEq, Clone)]
pub struct Info {
pub uuid: String,
pub name: String,
pub app_identifier: String,
pub creation_date: SystemTime,
pub expiration_date: SystemTime,
}
#[derive(Debug, Deserialize)]
struct InfoDef {
#[serde(rename = "UUID")]
pub uuid: String,
#[serde(rename = "Name")]
pub name: String,
#[serde(rename = "Entitlements")]
pub entitlements: Entitlements,
#[serde(rename = "CreationDate")]
pub creation_date: plist::Date,
#[serde(rename = "ExpirationDate")]
pub expiration_date: plist::Date,
}
#[derive(Debug, Deserialize)]
struct Entitlements {
#[serde(rename = "application-identifier")]
pub app_identifier: String,
}
impl Info {
/// Returns instance of the `Profile` parsed from a `data`.
pub fn from_xml_data(data: &[u8]) -> Option<Self> {
crate::plist_extractor::find(data).and_then(|xml| {
plist::from_reader_xml(io::Cursor::new(xml))
.ok()
.map(|info: InfoDef| Self {
uuid: info.uuid,
name: info.name,
app_identifier: info.entitlements.app_identifier,
creation_date: info.creation_date.into(),
expiration_date: info.expiration_date.into(),
})
})
}
/// Returns an empty profile info.
pub fn empty() -> Self {
Self {
uuid: "".into(),
name: "".into(),
app_identifier: "".into(),
creation_date: SystemTime::UNIX_EPOCH,
expiration_date: SystemTime::UNIX_EPOCH,
}
}
/// Returns `true` if one or more fields of the profile contain `string`.
pub fn contains(&self, string: &str) -> bool {
let s = string.to_lowercase();
let items = &[&self.name, &self.app_identifier, &self.uuid];
for item in items {
if item.to_lowercase().contains(&s) |
}
false
}
/// Returns a bundle id of a profile.
pub fn bundle_id(&self) -> Option<&str> {
self.app_identifier
.find(|ch| ch == '.')
.map(|i| &self.app_identifier[(i + 1)..])
}
/// Returns profile in a text form.
pub fn description(&self, oneline: bool) -> String {
if oneline {
return format!(
"{} {} {} {}",
self.uuid.yellow(),
DateTime::<Utc>::from(self.expiration_date)
.format("%Y-%m-%d")
.to_string()
.blue(),
self.app_identifier.green(),
self.name
);
} else {
let dates = format!(
"{} - {}",
DateTime::<Utc>::from(self.creation_date),
DateTime::<Utc>::from(self.expiration_date)
)
.blue();
return format!(
"{}\n{}\n{}\n{}",
self.uuid.yellow(),
self.app_identifier.green(),
self.name,
dates
);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use expectest::expect;
use expectest::prelude::*;
#[test]
fn contains() {
let profile = Info {
uuid: "123".into(),
name: "name".into(),
app_identifier: "id".into(),
creation_date: SystemTime::UNIX_EPOCH,
expiration_date: SystemTime::UNIX_EPOCH,
};
expect!(profile.contains("12")).to(be_true());
expect!(profile.contains("me")).to(be_true());
expect!(profile.contains("id")).to(be_true());
}
#[test]
fn correct_bundle_id() {
let mut profile = Info::empty();
profile.app_identifier = "12345ABCDE.com.exmaple.app".to_owned();
expect!(profile.bundle_id()).to(be_some().value("com.exmaple.app"));
}
#[test]
fn incorrect_bundle_id() {
let mut profile = Info::empty();
profile.app_identifier = "12345ABCDE".to_owned();
expect!(profile.bundle_id()).to(be_none());
}
#[test]
fn wildcard_bundle_id() {
let mut profile = Info::empty();
profile.app_identifier = "12345ABCDE.*".to_owned();
expect!(profile.bundle_id()).to(be_some().value("*"));
}
}
| {
return true;
} | conditional_block |
navigator.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::NavigatorBinding;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object};
use dom::window::Window;
use servo_util::str::DOMString;
#[deriving(Encodable)]
pub struct Navigator {
pub reflector_: Reflector //XXXjdm cycle: window->navigator->window
}
impl Navigator {
pub fn new_inherited() -> Navigator {
Navigator {
reflector_: Reflector::new()
}
}
pub fn new(window: &JSRef<Window>) -> Temporary<Navigator> {
reflect_dom_object(box Navigator::new_inherited(),
window,
NavigatorBinding::Wrap)
}
}
pub trait NavigatorMethods {
fn Product(&self) -> DOMString;
fn TaintEnabled(&self) -> bool;
fn AppName(&self) -> DOMString;
fn AppCodeName(&self) -> DOMString;
fn Platform(&self) -> DOMString;
}
impl<'a> NavigatorMethods for JSRef<'a, Navigator> {
fn Product(&self) -> DOMString {
"Gecko".to_string()
}
fn | (&self) -> bool {
false
}
fn AppName(&self) -> DOMString {
"Netscape".to_string() // Like Gecko/Webkit
}
fn AppCodeName(&self) -> DOMString {
"Mozilla".to_string()
}
fn Platform(&self) -> DOMString {
"".to_string()
}
}
impl Reflectable for Navigator {
fn reflector<'a>(&'a self) -> &'a Reflector {
&self.reflector_
}
}
| TaintEnabled | identifier_name |
navigator.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::NavigatorBinding;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object};
use dom::window::Window;
use servo_util::str::DOMString;
#[deriving(Encodable)]
pub struct Navigator {
pub reflector_: Reflector //XXXjdm cycle: window->navigator->window
}
impl Navigator {
pub fn new_inherited() -> Navigator {
Navigator {
reflector_: Reflector::new()
}
}
pub fn new(window: &JSRef<Window>) -> Temporary<Navigator> {
reflect_dom_object(box Navigator::new_inherited(),
window,
NavigatorBinding::Wrap)
}
}
pub trait NavigatorMethods {
fn Product(&self) -> DOMString;
fn TaintEnabled(&self) -> bool;
fn AppName(&self) -> DOMString;
fn AppCodeName(&self) -> DOMString;
fn Platform(&self) -> DOMString;
}
impl<'a> NavigatorMethods for JSRef<'a, Navigator> {
fn Product(&self) -> DOMString {
"Gecko".to_string()
}
fn TaintEnabled(&self) -> bool {
false
}
fn AppName(&self) -> DOMString {
"Netscape".to_string() // Like Gecko/Webkit
}
fn AppCodeName(&self) -> DOMString { |
fn Platform(&self) -> DOMString {
"".to_string()
}
}
impl Reflectable for Navigator {
fn reflector<'a>(&'a self) -> &'a Reflector {
&self.reflector_
}
} | "Mozilla".to_string()
} | random_line_split |
listeners.rs | // Copyright 2021 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use super::Session;
use crate::Error;
use log::{debug, error, info, trace, warn};
use qp2p::IncomingMessages;
use sn_data_types::PublicKey;
use sn_messaging::{
client::{ClientMsg, Event, ProcessMsg},
section_info::{Error as SectionInfoError, GetSectionResponse, SectionInfoMsg},
MessageId, MessageType, SectionAuthorityProvider, WireMsg,
};
use std::{
collections::{BTreeMap, BTreeSet},
net::SocketAddr,
};
impl Session {
/// Remove a pending transfer sender from the listener map
pub async fn remove_pending_transfer_sender(&self, msg_id: &MessageId) -> Result<(), Error> {
let pending_transfers = self.pending_transfers.clone();
let mut listeners = pending_transfers.write().await;
debug!("Pending transfers at this point: {:?}", listeners);
let _ = listeners
.remove(msg_id)
.ok_or(Error::NoTransferValidationListener)?;
Ok(())
}
// Listen for incoming messages on a connection
pub(crate) async fn spawn_message_listener_thread(
&self,
mut incoming_messages: IncomingMessages,
client_pk: PublicKey,
) {
debug!("Listening for incoming messages");
let mut session = self.clone();
let _ = tokio::spawn(async move {
loop {
match session
.process_incoming_message(&mut incoming_messages, client_pk)
.await
{
Ok(true) => (),
Ok(false) => {
info!("IncomingMessages listener has closed.");
break;
}
Err(err) => {
error!("Error while processing incoming message: {:?}. Listening for next message...", err);
}
}
}
});
}
pub(crate) async fn process_incoming_message(
&mut self,
incoming_messages: &mut IncomingMessages,
client_pk: PublicKey,
) -> Result<bool, Error> {
if let Some((src, message)) = incoming_messages.next().await {
let message_type = WireMsg::deserialize(message)?;
trace!("Incoming message from {:?}", &src);
match message_type {
MessageType::SectionInfo { msg,.. } => {
if let Err(error) = self.handle_section_info_msg(msg, src, client_pk).await {
error!("Error handling network info message: {:?}", error);
}
}
MessageType::Client { msg,.. } => {
match msg {
ClientMsg::Process(msg) => self.handle_client_msg(msg, src).await,
ClientMsg::ProcessingError(error) => {
warn!("Processing error received. {:?}", error);
// TODO: Handle lazy message errors
}
msg => warn!("SupportingInfo received: {:?}", msg),
}
}
msg_type => {
warn!("Unexpected message type received: {:?}", msg_type);
}
}
Ok(true)
} else {
Ok(false)
}
}
// Private helpers
// Handle received network info messages
async fn handle_section_info_msg(
&mut self,
msg: SectionInfoMsg,
src: SocketAddr,
client_pk: PublicKey,
) -> Result<(), Error> {
trace!("Handling network info message {:?}", msg);
match &msg {
SectionInfoMsg::GetSectionResponse(GetSectionResponse::Success(info)) => {
debug!("GetSectionResponse::Success!");
self.update_session_info(info).await
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(
SectionInfoError::InvalidBootstrap(err),
)) => {
warn!(
"Message was interrupted due to {:?}. Attempting to connect to elders again.",
err
);
self.connect_to_elders().await?;
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(
SectionInfoError::TargetSectionInfoOutdated(sap),
)) => {
debug!("Updated section info received: {:?}", sap);
self.update_session_info(sap).await?;
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::Redirect(sap)) => |
SectionInfoMsg::SectionInfoUpdate(update) => {
let correlation_id = update.correlation_id;
error!("MessageId {:?} was interrupted due to infrastructure updates. This will most likely need to be sent again. Update was : {:?}", correlation_id, update);
if let SectionInfoError::TargetSectionInfoOutdated(sap) = update.clone().error {
trace!("Updated network info: ({:?})", sap);
self.update_session_info(&sap).await?;
}
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(_))
| SectionInfoMsg::GetSectionQuery {.. } => {
Err(Error::UnexpectedMessageOnJoin(format!(
"bootstrapping failed since an invalid response ({:?}) was received",
msg
)))
}
}
}
// Apply updated info to a network session, and trigger connections
async fn update_session_info(&mut self, sap: &SectionAuthorityProvider) -> Result<(), Error> {
let original_known_elders = self.all_known_elders.read().await.clone();
// Change this once sn_messaging is updated
let received_elders = sap
.elders
.iter()
.map(|(name, addr)| (*addr, *name))
.collect::<BTreeMap<_, _>>();
// Obtain the addresses of the Elders
trace!(
"Updating session info! Received elders: ({:?})",
received_elders
);
{
// Update session key set
let mut keyset = self.section_key_set.write().await;
if *keyset == Some(sap.public_key_set.clone()) {
trace!("We have previously received the key set already.");
return Ok(());
}
*keyset = Some(sap.public_key_set.clone());
}
{
// update section prefix
let mut prefix = self.section_prefix.write().await;
*prefix = Some(sap.prefix);
}
{
// Update session elders
let mut session_elders = self.all_known_elders.write().await;
*session_elders = received_elders.clone();
}
if original_known_elders!= received_elders {
debug!("Connecting to new set of Elders: {:?}", received_elders);
let new_elder_addresses = received_elders.keys().cloned().collect::<BTreeSet<_>>();
let updated_contacts = new_elder_addresses.iter().cloned().collect::<Vec<_>>();
let old_elders = original_known_elders
.iter()
.filter_map(|(peer_addr, _)| {
if!new_elder_addresses.contains(peer_addr) {
Some(*peer_addr)
} else {
None
}
})
.collect::<Vec<_>>();
self.disconnect_from_peers(old_elders).await?;
self.qp2p.update_bootstrap_contacts(&updated_contacts);
self.connect_to_elders().await
} else {
Ok(())
}
}
// Handle messages intended for client consumption (re: queries + commands)
async fn handle_client_msg(&self, msg: ProcessMsg, src: SocketAddr) {
debug!(
"===> ClientMsg with id {:?} received from {:?}",
msg.id(),
src
);
let queries = self.pending_queries.clone();
let transfers = self.pending_transfers.clone();
let error_sender = self.incoming_err_sender.clone();
let _ = tokio::spawn(async move {
debug!("Thread spawned to handle this client message");
match msg {
ProcessMsg::QueryResponse {
response,
correlation_id,
..
} => {
debug!("Query response (relating to msgid: {})", correlation_id);
trace!("The received query response is {:?}", response);
// Note that this doesn't remove the sender from here since multiple
// responses corresponding to the same message ID might arrive.
// Once we are satisfied with the response this is channel is discarded in
// ConnectionManager::send_query
if let Some(sender) = &queries.read().await.get(&correlation_id) {
trace!(
"Sending response for query w/{} via channel.",
correlation_id
);
let _ = sender.send(response).await;
} else {
trace!("No channel found for {:?}", correlation_id);
}
}
ProcessMsg::Event {
event,
correlation_id,
..
} => {
debug!("Event received to be processed: {:?}", correlation_id);
trace!("Event received is: {:?}", event);
if let Event::TransferValidated { event,.. } = event {
let transfers = transfers.read().await;
let sender = transfers.get(&correlation_id);
if let Some(sender) = sender {
let _ = sender.send(Ok(event)).await;
} else {
warn!(
"No transfer validation listener found for elder {:?} and message {:?}",
src, correlation_id
);
warn!("It may be that this transfer is complete and the listener cleaned up already.");
trace!("Event received was {:?}", event);
}
}
}
ProcessMsg::CmdError {
error,
correlation_id,
..
} => {
debug!(
"Cmd Error was received for Message w/ID: {:?}, sending on error channel",
correlation_id
);
trace!("Error received is: {:?}", error);
let _ = error_sender.send(error).await;
}
msg => {
warn!("Ignoring unexpected message type received: {:?}", msg);
}
};
});
}
}
| {
trace!("GetSectionResponse::Redirect, reboostrapping with provided peers");
// Disconnect from peer that sent us the redirect, connect to the new elders provided and
// request the section info again.
self.disconnect_from_peers(vec![src]).await?;
let endpoint = self.endpoint()?.clone();
let new_elders_addrs: Vec<SocketAddr> =
sap.elders.iter().map(|(_, addr)| *addr).collect();
self.qp2p
.update_bootstrap_contacts(new_elders_addrs.as_slice());
let boostrapped_peer = self
.qp2p
.rebootstrap(&endpoint, new_elders_addrs.as_slice())
.await?;
self.send_get_section_query(client_pk, &boostrapped_peer)
.await?;
Ok(())
} | conditional_block |
listeners.rs | // Copyright 2021 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use super::Session;
use crate::Error;
use log::{debug, error, info, trace, warn};
use qp2p::IncomingMessages;
use sn_data_types::PublicKey;
use sn_messaging::{
client::{ClientMsg, Event, ProcessMsg},
section_info::{Error as SectionInfoError, GetSectionResponse, SectionInfoMsg},
MessageId, MessageType, SectionAuthorityProvider, WireMsg,
};
use std::{
collections::{BTreeMap, BTreeSet},
net::SocketAddr,
};
impl Session {
/// Remove a pending transfer sender from the listener map
pub async fn remove_pending_transfer_sender(&self, msg_id: &MessageId) -> Result<(), Error> {
let pending_transfers = self.pending_transfers.clone();
let mut listeners = pending_transfers.write().await;
debug!("Pending transfers at this point: {:?}", listeners);
let _ = listeners
.remove(msg_id)
.ok_or(Error::NoTransferValidationListener)?;
Ok(())
}
// Listen for incoming messages on a connection
pub(crate) async fn spawn_message_listener_thread(
&self,
mut incoming_messages: IncomingMessages,
client_pk: PublicKey,
) {
debug!("Listening for incoming messages");
let mut session = self.clone();
let _ = tokio::spawn(async move {
loop {
match session
.process_incoming_message(&mut incoming_messages, client_pk)
.await
{
Ok(true) => (),
Ok(false) => {
info!("IncomingMessages listener has closed.");
break;
}
Err(err) => {
error!("Error while processing incoming message: {:?}. Listening for next message...", err);
}
}
}
});
}
pub(crate) async fn process_incoming_message(
&mut self,
incoming_messages: &mut IncomingMessages,
client_pk: PublicKey,
) -> Result<bool, Error> {
if let Some((src, message)) = incoming_messages.next().await {
let message_type = WireMsg::deserialize(message)?;
trace!("Incoming message from {:?}", &src);
match message_type {
MessageType::SectionInfo { msg,.. } => {
if let Err(error) = self.handle_section_info_msg(msg, src, client_pk).await { | ClientMsg::Process(msg) => self.handle_client_msg(msg, src).await,
ClientMsg::ProcessingError(error) => {
warn!("Processing error received. {:?}", error);
// TODO: Handle lazy message errors
}
msg => warn!("SupportingInfo received: {:?}", msg),
}
}
msg_type => {
warn!("Unexpected message type received: {:?}", msg_type);
}
}
Ok(true)
} else {
Ok(false)
}
}
// Private helpers
// Handle received network info messages
async fn handle_section_info_msg(
&mut self,
msg: SectionInfoMsg,
src: SocketAddr,
client_pk: PublicKey,
) -> Result<(), Error> {
trace!("Handling network info message {:?}", msg);
match &msg {
SectionInfoMsg::GetSectionResponse(GetSectionResponse::Success(info)) => {
debug!("GetSectionResponse::Success!");
self.update_session_info(info).await
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(
SectionInfoError::InvalidBootstrap(err),
)) => {
warn!(
"Message was interrupted due to {:?}. Attempting to connect to elders again.",
err
);
self.connect_to_elders().await?;
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(
SectionInfoError::TargetSectionInfoOutdated(sap),
)) => {
debug!("Updated section info received: {:?}", sap);
self.update_session_info(sap).await?;
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::Redirect(sap)) => {
trace!("GetSectionResponse::Redirect, reboostrapping with provided peers");
// Disconnect from peer that sent us the redirect, connect to the new elders provided and
// request the section info again.
self.disconnect_from_peers(vec![src]).await?;
let endpoint = self.endpoint()?.clone();
let new_elders_addrs: Vec<SocketAddr> =
sap.elders.iter().map(|(_, addr)| *addr).collect();
self.qp2p
.update_bootstrap_contacts(new_elders_addrs.as_slice());
let boostrapped_peer = self
.qp2p
.rebootstrap(&endpoint, new_elders_addrs.as_slice())
.await?;
self.send_get_section_query(client_pk, &boostrapped_peer)
.await?;
Ok(())
}
SectionInfoMsg::SectionInfoUpdate(update) => {
let correlation_id = update.correlation_id;
error!("MessageId {:?} was interrupted due to infrastructure updates. This will most likely need to be sent again. Update was : {:?}", correlation_id, update);
if let SectionInfoError::TargetSectionInfoOutdated(sap) = update.clone().error {
trace!("Updated network info: ({:?})", sap);
self.update_session_info(&sap).await?;
}
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(_))
| SectionInfoMsg::GetSectionQuery {.. } => {
Err(Error::UnexpectedMessageOnJoin(format!(
"bootstrapping failed since an invalid response ({:?}) was received",
msg
)))
}
}
}
// Apply updated info to a network session, and trigger connections
async fn update_session_info(&mut self, sap: &SectionAuthorityProvider) -> Result<(), Error> {
let original_known_elders = self.all_known_elders.read().await.clone();
// Change this once sn_messaging is updated
let received_elders = sap
.elders
.iter()
.map(|(name, addr)| (*addr, *name))
.collect::<BTreeMap<_, _>>();
// Obtain the addresses of the Elders
trace!(
"Updating session info! Received elders: ({:?})",
received_elders
);
{
// Update session key set
let mut keyset = self.section_key_set.write().await;
if *keyset == Some(sap.public_key_set.clone()) {
trace!("We have previously received the key set already.");
return Ok(());
}
*keyset = Some(sap.public_key_set.clone());
}
{
// update section prefix
let mut prefix = self.section_prefix.write().await;
*prefix = Some(sap.prefix);
}
{
// Update session elders
let mut session_elders = self.all_known_elders.write().await;
*session_elders = received_elders.clone();
}
if original_known_elders!= received_elders {
debug!("Connecting to new set of Elders: {:?}", received_elders);
let new_elder_addresses = received_elders.keys().cloned().collect::<BTreeSet<_>>();
let updated_contacts = new_elder_addresses.iter().cloned().collect::<Vec<_>>();
let old_elders = original_known_elders
.iter()
.filter_map(|(peer_addr, _)| {
if!new_elder_addresses.contains(peer_addr) {
Some(*peer_addr)
} else {
None
}
})
.collect::<Vec<_>>();
self.disconnect_from_peers(old_elders).await?;
self.qp2p.update_bootstrap_contacts(&updated_contacts);
self.connect_to_elders().await
} else {
Ok(())
}
}
// Handle messages intended for client consumption (re: queries + commands)
async fn handle_client_msg(&self, msg: ProcessMsg, src: SocketAddr) {
debug!(
"===> ClientMsg with id {:?} received from {:?}",
msg.id(),
src
);
let queries = self.pending_queries.clone();
let transfers = self.pending_transfers.clone();
let error_sender = self.incoming_err_sender.clone();
let _ = tokio::spawn(async move {
debug!("Thread spawned to handle this client message");
match msg {
ProcessMsg::QueryResponse {
response,
correlation_id,
..
} => {
debug!("Query response (relating to msgid: {})", correlation_id);
trace!("The received query response is {:?}", response);
// Note that this doesn't remove the sender from here since multiple
// responses corresponding to the same message ID might arrive.
// Once we are satisfied with the response this is channel is discarded in
// ConnectionManager::send_query
if let Some(sender) = &queries.read().await.get(&correlation_id) {
trace!(
"Sending response for query w/{} via channel.",
correlation_id
);
let _ = sender.send(response).await;
} else {
trace!("No channel found for {:?}", correlation_id);
}
}
ProcessMsg::Event {
event,
correlation_id,
..
} => {
debug!("Event received to be processed: {:?}", correlation_id);
trace!("Event received is: {:?}", event);
if let Event::TransferValidated { event,.. } = event {
let transfers = transfers.read().await;
let sender = transfers.get(&correlation_id);
if let Some(sender) = sender {
let _ = sender.send(Ok(event)).await;
} else {
warn!(
"No transfer validation listener found for elder {:?} and message {:?}",
src, correlation_id
);
warn!("It may be that this transfer is complete and the listener cleaned up already.");
trace!("Event received was {:?}", event);
}
}
}
ProcessMsg::CmdError {
error,
correlation_id,
..
} => {
debug!(
"Cmd Error was received for Message w/ID: {:?}, sending on error channel",
correlation_id
);
trace!("Error received is: {:?}", error);
let _ = error_sender.send(error).await;
}
msg => {
warn!("Ignoring unexpected message type received: {:?}", msg);
}
};
});
}
} | error!("Error handling network info message: {:?}", error);
}
}
MessageType::Client { msg, .. } => {
match msg { | random_line_split |
listeners.rs | // Copyright 2021 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use super::Session;
use crate::Error;
use log::{debug, error, info, trace, warn};
use qp2p::IncomingMessages;
use sn_data_types::PublicKey;
use sn_messaging::{
client::{ClientMsg, Event, ProcessMsg},
section_info::{Error as SectionInfoError, GetSectionResponse, SectionInfoMsg},
MessageId, MessageType, SectionAuthorityProvider, WireMsg,
};
use std::{
collections::{BTreeMap, BTreeSet},
net::SocketAddr,
};
impl Session {
/// Remove a pending transfer sender from the listener map
pub async fn remove_pending_transfer_sender(&self, msg_id: &MessageId) -> Result<(), Error> {
let pending_transfers = self.pending_transfers.clone();
let mut listeners = pending_transfers.write().await;
debug!("Pending transfers at this point: {:?}", listeners);
let _ = listeners
.remove(msg_id)
.ok_or(Error::NoTransferValidationListener)?;
Ok(())
}
// Listen for incoming messages on a connection
pub(crate) async fn spawn_message_listener_thread(
&self,
mut incoming_messages: IncomingMessages,
client_pk: PublicKey,
) {
debug!("Listening for incoming messages");
let mut session = self.clone();
let _ = tokio::spawn(async move {
loop {
match session
.process_incoming_message(&mut incoming_messages, client_pk)
.await
{
Ok(true) => (),
Ok(false) => {
info!("IncomingMessages listener has closed.");
break;
}
Err(err) => {
error!("Error while processing incoming message: {:?}. Listening for next message...", err);
}
}
}
});
}
pub(crate) async fn process_incoming_message(
&mut self,
incoming_messages: &mut IncomingMessages,
client_pk: PublicKey,
) -> Result<bool, Error> {
if let Some((src, message)) = incoming_messages.next().await {
let message_type = WireMsg::deserialize(message)?;
trace!("Incoming message from {:?}", &src);
match message_type {
MessageType::SectionInfo { msg,.. } => {
if let Err(error) = self.handle_section_info_msg(msg, src, client_pk).await {
error!("Error handling network info message: {:?}", error);
}
}
MessageType::Client { msg,.. } => {
match msg {
ClientMsg::Process(msg) => self.handle_client_msg(msg, src).await,
ClientMsg::ProcessingError(error) => {
warn!("Processing error received. {:?}", error);
// TODO: Handle lazy message errors
}
msg => warn!("SupportingInfo received: {:?}", msg),
}
}
msg_type => {
warn!("Unexpected message type received: {:?}", msg_type);
}
}
Ok(true)
} else {
Ok(false)
}
}
// Private helpers
// Handle received network info messages
async fn handle_section_info_msg(
&mut self,
msg: SectionInfoMsg,
src: SocketAddr,
client_pk: PublicKey,
) -> Result<(), Error> {
trace!("Handling network info message {:?}", msg);
match &msg {
SectionInfoMsg::GetSectionResponse(GetSectionResponse::Success(info)) => {
debug!("GetSectionResponse::Success!");
self.update_session_info(info).await
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(
SectionInfoError::InvalidBootstrap(err),
)) => {
warn!(
"Message was interrupted due to {:?}. Attempting to connect to elders again.",
err
);
self.connect_to_elders().await?;
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(
SectionInfoError::TargetSectionInfoOutdated(sap),
)) => {
debug!("Updated section info received: {:?}", sap);
self.update_session_info(sap).await?;
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::Redirect(sap)) => {
trace!("GetSectionResponse::Redirect, reboostrapping with provided peers");
// Disconnect from peer that sent us the redirect, connect to the new elders provided and
// request the section info again.
self.disconnect_from_peers(vec![src]).await?;
let endpoint = self.endpoint()?.clone();
let new_elders_addrs: Vec<SocketAddr> =
sap.elders.iter().map(|(_, addr)| *addr).collect();
self.qp2p
.update_bootstrap_contacts(new_elders_addrs.as_slice());
let boostrapped_peer = self
.qp2p
.rebootstrap(&endpoint, new_elders_addrs.as_slice())
.await?;
self.send_get_section_query(client_pk, &boostrapped_peer)
.await?;
Ok(())
}
SectionInfoMsg::SectionInfoUpdate(update) => {
let correlation_id = update.correlation_id;
error!("MessageId {:?} was interrupted due to infrastructure updates. This will most likely need to be sent again. Update was : {:?}", correlation_id, update);
if let SectionInfoError::TargetSectionInfoOutdated(sap) = update.clone().error {
trace!("Updated network info: ({:?})", sap);
self.update_session_info(&sap).await?;
}
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(_))
| SectionInfoMsg::GetSectionQuery {.. } => {
Err(Error::UnexpectedMessageOnJoin(format!(
"bootstrapping failed since an invalid response ({:?}) was received",
msg
)))
}
}
}
// Apply updated info to a network session, and trigger connections
async fn update_session_info(&mut self, sap: &SectionAuthorityProvider) -> Result<(), Error> {
let original_known_elders = self.all_known_elders.read().await.clone();
// Change this once sn_messaging is updated
let received_elders = sap
.elders
.iter()
.map(|(name, addr)| (*addr, *name))
.collect::<BTreeMap<_, _>>();
// Obtain the addresses of the Elders
trace!(
"Updating session info! Received elders: ({:?})",
received_elders
);
{
// Update session key set
let mut keyset = self.section_key_set.write().await;
if *keyset == Some(sap.public_key_set.clone()) {
trace!("We have previously received the key set already.");
return Ok(());
}
*keyset = Some(sap.public_key_set.clone());
}
{
// update section prefix
let mut prefix = self.section_prefix.write().await;
*prefix = Some(sap.prefix);
}
{
// Update session elders
let mut session_elders = self.all_known_elders.write().await;
*session_elders = received_elders.clone();
}
if original_known_elders!= received_elders {
debug!("Connecting to new set of Elders: {:?}", received_elders);
let new_elder_addresses = received_elders.keys().cloned().collect::<BTreeSet<_>>();
let updated_contacts = new_elder_addresses.iter().cloned().collect::<Vec<_>>();
let old_elders = original_known_elders
.iter()
.filter_map(|(peer_addr, _)| {
if!new_elder_addresses.contains(peer_addr) {
Some(*peer_addr)
} else {
None
}
})
.collect::<Vec<_>>();
self.disconnect_from_peers(old_elders).await?;
self.qp2p.update_bootstrap_contacts(&updated_contacts);
self.connect_to_elders().await
} else {
Ok(())
}
}
// Handle messages intended for client consumption (re: queries + commands)
async fn handle_client_msg(&self, msg: ProcessMsg, src: SocketAddr) |
// Note that this doesn't remove the sender from here since multiple
// responses corresponding to the same message ID might arrive.
// Once we are satisfied with the response this is channel is discarded in
// ConnectionManager::send_query
if let Some(sender) = &queries.read().await.get(&correlation_id) {
trace!(
"Sending response for query w/{} via channel.",
correlation_id
);
let _ = sender.send(response).await;
} else {
trace!("No channel found for {:?}", correlation_id);
}
}
ProcessMsg::Event {
event,
correlation_id,
..
} => {
debug!("Event received to be processed: {:?}", correlation_id);
trace!("Event received is: {:?}", event);
if let Event::TransferValidated { event,.. } = event {
let transfers = transfers.read().await;
let sender = transfers.get(&correlation_id);
if let Some(sender) = sender {
let _ = sender.send(Ok(event)).await;
} else {
warn!(
"No transfer validation listener found for elder {:?} and message {:?}",
src, correlation_id
);
warn!("It may be that this transfer is complete and the listener cleaned up already.");
trace!("Event received was {:?}", event);
}
}
}
ProcessMsg::CmdError {
error,
correlation_id,
..
} => {
debug!(
"Cmd Error was received for Message w/ID: {:?}, sending on error channel",
correlation_id
);
trace!("Error received is: {:?}", error);
let _ = error_sender.send(error).await;
}
msg => {
warn!("Ignoring unexpected message type received: {:?}", msg);
}
};
});
}
}
| {
debug!(
"===> ClientMsg with id {:?} received from {:?}",
msg.id(),
src
);
let queries = self.pending_queries.clone();
let transfers = self.pending_transfers.clone();
let error_sender = self.incoming_err_sender.clone();
let _ = tokio::spawn(async move {
debug!("Thread spawned to handle this client message");
match msg {
ProcessMsg::QueryResponse {
response,
correlation_id,
..
} => {
debug!("Query response (relating to msgid: {})", correlation_id);
trace!("The received query response is {:?}", response); | identifier_body |
listeners.rs | // Copyright 2021 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use super::Session;
use crate::Error;
use log::{debug, error, info, trace, warn};
use qp2p::IncomingMessages;
use sn_data_types::PublicKey;
use sn_messaging::{
client::{ClientMsg, Event, ProcessMsg},
section_info::{Error as SectionInfoError, GetSectionResponse, SectionInfoMsg},
MessageId, MessageType, SectionAuthorityProvider, WireMsg,
};
use std::{
collections::{BTreeMap, BTreeSet},
net::SocketAddr,
};
impl Session {
/// Remove a pending transfer sender from the listener map
pub async fn | (&self, msg_id: &MessageId) -> Result<(), Error> {
let pending_transfers = self.pending_transfers.clone();
let mut listeners = pending_transfers.write().await;
debug!("Pending transfers at this point: {:?}", listeners);
let _ = listeners
.remove(msg_id)
.ok_or(Error::NoTransferValidationListener)?;
Ok(())
}
// Listen for incoming messages on a connection
pub(crate) async fn spawn_message_listener_thread(
&self,
mut incoming_messages: IncomingMessages,
client_pk: PublicKey,
) {
debug!("Listening for incoming messages");
let mut session = self.clone();
let _ = tokio::spawn(async move {
loop {
match session
.process_incoming_message(&mut incoming_messages, client_pk)
.await
{
Ok(true) => (),
Ok(false) => {
info!("IncomingMessages listener has closed.");
break;
}
Err(err) => {
error!("Error while processing incoming message: {:?}. Listening for next message...", err);
}
}
}
});
}
pub(crate) async fn process_incoming_message(
&mut self,
incoming_messages: &mut IncomingMessages,
client_pk: PublicKey,
) -> Result<bool, Error> {
if let Some((src, message)) = incoming_messages.next().await {
let message_type = WireMsg::deserialize(message)?;
trace!("Incoming message from {:?}", &src);
match message_type {
MessageType::SectionInfo { msg,.. } => {
if let Err(error) = self.handle_section_info_msg(msg, src, client_pk).await {
error!("Error handling network info message: {:?}", error);
}
}
MessageType::Client { msg,.. } => {
match msg {
ClientMsg::Process(msg) => self.handle_client_msg(msg, src).await,
ClientMsg::ProcessingError(error) => {
warn!("Processing error received. {:?}", error);
// TODO: Handle lazy message errors
}
msg => warn!("SupportingInfo received: {:?}", msg),
}
}
msg_type => {
warn!("Unexpected message type received: {:?}", msg_type);
}
}
Ok(true)
} else {
Ok(false)
}
}
// Private helpers
// Handle received network info messages
async fn handle_section_info_msg(
&mut self,
msg: SectionInfoMsg,
src: SocketAddr,
client_pk: PublicKey,
) -> Result<(), Error> {
trace!("Handling network info message {:?}", msg);
match &msg {
SectionInfoMsg::GetSectionResponse(GetSectionResponse::Success(info)) => {
debug!("GetSectionResponse::Success!");
self.update_session_info(info).await
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(
SectionInfoError::InvalidBootstrap(err),
)) => {
warn!(
"Message was interrupted due to {:?}. Attempting to connect to elders again.",
err
);
self.connect_to_elders().await?;
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(
SectionInfoError::TargetSectionInfoOutdated(sap),
)) => {
debug!("Updated section info received: {:?}", sap);
self.update_session_info(sap).await?;
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::Redirect(sap)) => {
trace!("GetSectionResponse::Redirect, reboostrapping with provided peers");
// Disconnect from peer that sent us the redirect, connect to the new elders provided and
// request the section info again.
self.disconnect_from_peers(vec![src]).await?;
let endpoint = self.endpoint()?.clone();
let new_elders_addrs: Vec<SocketAddr> =
sap.elders.iter().map(|(_, addr)| *addr).collect();
self.qp2p
.update_bootstrap_contacts(new_elders_addrs.as_slice());
let boostrapped_peer = self
.qp2p
.rebootstrap(&endpoint, new_elders_addrs.as_slice())
.await?;
self.send_get_section_query(client_pk, &boostrapped_peer)
.await?;
Ok(())
}
SectionInfoMsg::SectionInfoUpdate(update) => {
let correlation_id = update.correlation_id;
error!("MessageId {:?} was interrupted due to infrastructure updates. This will most likely need to be sent again. Update was : {:?}", correlation_id, update);
if let SectionInfoError::TargetSectionInfoOutdated(sap) = update.clone().error {
trace!("Updated network info: ({:?})", sap);
self.update_session_info(&sap).await?;
}
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(_))
| SectionInfoMsg::GetSectionQuery {.. } => {
Err(Error::UnexpectedMessageOnJoin(format!(
"bootstrapping failed since an invalid response ({:?}) was received",
msg
)))
}
}
}
// Apply updated info to a network session, and trigger connections
async fn update_session_info(&mut self, sap: &SectionAuthorityProvider) -> Result<(), Error> {
let original_known_elders = self.all_known_elders.read().await.clone();
// Change this once sn_messaging is updated
let received_elders = sap
.elders
.iter()
.map(|(name, addr)| (*addr, *name))
.collect::<BTreeMap<_, _>>();
// Obtain the addresses of the Elders
trace!(
"Updating session info! Received elders: ({:?})",
received_elders
);
{
// Update session key set
let mut keyset = self.section_key_set.write().await;
if *keyset == Some(sap.public_key_set.clone()) {
trace!("We have previously received the key set already.");
return Ok(());
}
*keyset = Some(sap.public_key_set.clone());
}
{
// update section prefix
let mut prefix = self.section_prefix.write().await;
*prefix = Some(sap.prefix);
}
{
// Update session elders
let mut session_elders = self.all_known_elders.write().await;
*session_elders = received_elders.clone();
}
if original_known_elders!= received_elders {
debug!("Connecting to new set of Elders: {:?}", received_elders);
let new_elder_addresses = received_elders.keys().cloned().collect::<BTreeSet<_>>();
let updated_contacts = new_elder_addresses.iter().cloned().collect::<Vec<_>>();
let old_elders = original_known_elders
.iter()
.filter_map(|(peer_addr, _)| {
if!new_elder_addresses.contains(peer_addr) {
Some(*peer_addr)
} else {
None
}
})
.collect::<Vec<_>>();
self.disconnect_from_peers(old_elders).await?;
self.qp2p.update_bootstrap_contacts(&updated_contacts);
self.connect_to_elders().await
} else {
Ok(())
}
}
// Handle messages intended for client consumption (re: queries + commands)
async fn handle_client_msg(&self, msg: ProcessMsg, src: SocketAddr) {
debug!(
"===> ClientMsg with id {:?} received from {:?}",
msg.id(),
src
);
let queries = self.pending_queries.clone();
let transfers = self.pending_transfers.clone();
let error_sender = self.incoming_err_sender.clone();
let _ = tokio::spawn(async move {
debug!("Thread spawned to handle this client message");
match msg {
ProcessMsg::QueryResponse {
response,
correlation_id,
..
} => {
debug!("Query response (relating to msgid: {})", correlation_id);
trace!("The received query response is {:?}", response);
// Note that this doesn't remove the sender from here since multiple
// responses corresponding to the same message ID might arrive.
// Once we are satisfied with the response this is channel is discarded in
// ConnectionManager::send_query
if let Some(sender) = &queries.read().await.get(&correlation_id) {
trace!(
"Sending response for query w/{} via channel.",
correlation_id
);
let _ = sender.send(response).await;
} else {
trace!("No channel found for {:?}", correlation_id);
}
}
ProcessMsg::Event {
event,
correlation_id,
..
} => {
debug!("Event received to be processed: {:?}", correlation_id);
trace!("Event received is: {:?}", event);
if let Event::TransferValidated { event,.. } = event {
let transfers = transfers.read().await;
let sender = transfers.get(&correlation_id);
if let Some(sender) = sender {
let _ = sender.send(Ok(event)).await;
} else {
warn!(
"No transfer validation listener found for elder {:?} and message {:?}",
src, correlation_id
);
warn!("It may be that this transfer is complete and the listener cleaned up already.");
trace!("Event received was {:?}", event);
}
}
}
ProcessMsg::CmdError {
error,
correlation_id,
..
} => {
debug!(
"Cmd Error was received for Message w/ID: {:?}, sending on error channel",
correlation_id
);
trace!("Error received is: {:?}", error);
let _ = error_sender.send(error).await;
}
msg => {
warn!("Ignoring unexpected message type received: {:?}", msg);
}
};
});
}
}
| remove_pending_transfer_sender | identifier_name |
issue-2502.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
struct font<'a> {
fontbuf: &'a ~[u8],
}
impl<'a> font<'a> {
pub fn buf(&self) -> &'a ~[u8] {
self.fontbuf
}
}
fn font<'r>(fontbuf: &'r ~[u8]) -> font<'r> {
font {
fontbuf: fontbuf
}
}
pub fn main() { } | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms. | random_line_split |
issue-2502.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct | <'a> {
fontbuf: &'a ~[u8],
}
impl<'a> font<'a> {
pub fn buf(&self) -> &'a ~[u8] {
self.fontbuf
}
}
fn font<'r>(fontbuf: &'r ~[u8]) -> font<'r> {
font {
fontbuf: fontbuf
}
}
pub fn main() { }
| font | identifier_name |
issue-2502.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct font<'a> {
fontbuf: &'a ~[u8],
}
impl<'a> font<'a> {
pub fn buf(&self) -> &'a ~[u8] {
self.fontbuf
}
}
fn font<'r>(fontbuf: &'r ~[u8]) -> font<'r> |
pub fn main() { }
| {
font {
fontbuf: fontbuf
}
} | identifier_body |
ipdl-analyze.rs | use std::env;
use std::path::Path;
use std::path::PathBuf;
use std::fs::File;
use std::io::Write;
extern crate tools;
extern crate ipdl_parser;
extern crate getopts;
use getopts::Options;
use tools::file_format::analysis::{read_analysis, read_target, WithLocation, AnalysisTarget, AnalysisKind};
use ipdl_parser::parser;
use ipdl_parser::ast;
type TargetAnalysis = Vec<WithLocation<Vec<AnalysisTarget>>>;
fn | () -> Options {
let mut opts = Options::new();
opts.optmulti("I", "include",
"Additional directory to search for included protocol specifications",
"DIR");
opts.reqopt("d", "outheaders-dir",
"Directory into which C++ headers analysis data is location.",
"HDR_DIR");
opts.reqopt("b", "base-input-prefix",
"Base directory where IPDL input files are found.",
"BASE_DIR");
opts.reqopt("a", "analysis-prefix",
"Base directory where analysis output files are found.",
"ANALYSIS_DIR");
opts
}
fn header_file_name(outheaders_dir: &str, ns: &ast::Namespace, parent_or_child: &str) -> String {
format!("{}/{}/{}{}.h",
outheaders_dir,
ns.namespaces.clone().join("/"),
ns.name.id,
parent_or_child)
}
fn mangle_simple(s: &str) -> String {
format!("{}{}", s.len(), s)
}
fn mangle_nested_name(ns: &[String], protocol: &str, name: &str) -> String {
format!("_ZN{}{}{}E",
ns.iter().map(|id| mangle_simple(&id)).collect::<Vec<_>>().join(""),
mangle_simple(protocol),
mangle_simple(name))
}
fn find_analysis<'a>(analysis: &'a TargetAnalysis, mangled: &str) -> Option<&'a AnalysisTarget>
{
for datum in analysis {
for piece in &datum.data {
if piece.kind == AnalysisKind::Decl && piece.sym.contains(mangled) {
return Some(&piece);
}
}
}
println!("No analysis target found for {}", mangled);
return None
}
fn output_data(outputf: &mut File, locstr: &str, datum: &AnalysisTarget) {
write!(outputf, r#"{{"loc": "{}", "target": 1, "kind": "idl", "pretty": "{}", "sym": "{}"}}"#,
locstr, datum.pretty, datum.sym).unwrap();
write!(outputf, "\n").unwrap();
write!(outputf, r#"{{"loc": "{}", "source": 1, "pretty": "{}", "sym": "{}"}}"#,
locstr, datum.pretty, datum.sym).unwrap();
write!(outputf, "\n").unwrap();
}
fn output_send_recv(outputf: &mut File,
locstr: &str,
protocol: &ast::Namespace,
message: &ast::MessageDecl,
is_ctor: bool,
send_side: &str, send_analysis: &TargetAnalysis,
recv_side: &str, recv_analysis: &TargetAnalysis)
{
let send_prefix = if message.send_semantics == ast::SendSemantics::Intr { "Call" } else { "Send" };
let recv_prefix = if message.send_semantics == ast::SendSemantics::Intr { "Answer" } else { "Recv" };
let ctor_suffix = if is_ctor { "Constructor" } else { "" };
let mangled = mangle_nested_name(&protocol.namespaces,
&format!("{}{}", protocol.name.id, send_side),
&format!("{}{}{}", send_prefix, message.name.id, ctor_suffix));
if let Some(send_datum) = find_analysis(send_analysis, &mangled) {
output_data(outputf, &locstr, &send_datum);
}
let mangled = mangle_nested_name(&protocol.namespaces,
&format!("{}{}", protocol.name.id, recv_side),
&format!("{}{}{}", recv_prefix, message.name.id, ctor_suffix));
if let Some(recv_datum) = find_analysis(recv_analysis, &mangled) {
output_data(outputf, &locstr, &recv_datum);
}
}
fn main() {
let args : Vec<String> = env::args().collect();
let opts = get_options_parser();
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m },
Err(f) => { panic!(f.to_string()) },
};
let mut include_dirs = Vec::new();
for i in matches.opt_strs("I") {
include_dirs.push(PathBuf::from(i))
}
let outheaders_dir = matches.opt_str("d").unwrap();
let base_dir = matches.opt_str("b").unwrap();
let analysis_dir = matches.opt_str("a").unwrap();
let base_path = Path::new(&base_dir);
let analysis_path = Path::new(&analysis_dir);
let mut file_names = Vec::new();
for f in matches.free {
file_names.push(PathBuf::from(f));
}
let maybe_tus = parser::parse(&include_dirs, file_names);
if maybe_tus.is_none() {
println!("Specification could not be parsed.");
return;
}
let tus = maybe_tus.unwrap();
for (_, tu) in tus {
println!("Analyzing {:?}", tu.file_name);
let path = tu.file_name.as_path();
let relative = path.strip_prefix(base_path).unwrap();
let absolute = analysis_path.join(relative);
let mut outputf = File::create(absolute).unwrap();
if let Some((ns, protocol)) = tu.protocol {
let parent_fname = header_file_name(&outheaders_dir, &ns, "Parent");
let parent_analysis = read_analysis(&parent_fname, &read_target);
let child_fname = header_file_name(&outheaders_dir, &ns, "Child");
let child_analysis = read_analysis(&child_fname, &read_target);
let is_toplevel = protocol.managers.len() == 0;
for message in protocol.messages {
let loc = &message.name.loc;
let locstr = format!("{}:{}-{}", loc.lineno, loc.colno, loc.colno + message.name.id.len());
if is_toplevel && message.name.id == "__delete__" {
continue;
}
let is_ctor = protocol.manages.iter().any(|e| e.id == message.name.id);
if message.direction == ast::Direction::ToChild || message.direction == ast::Direction::ToParentOrChild {
output_send_recv(&mut outputf, &locstr, &ns, &message, is_ctor,
"Parent", &parent_analysis, "Child", &child_analysis);
}
if message.direction == ast::Direction::ToParent || message.direction == ast::Direction::ToParentOrChild {
output_send_recv(&mut outputf, &locstr, &ns, &message, is_ctor,
"Child", &child_analysis, "Parent", &parent_analysis);
}
}
}
}
}
| get_options_parser | identifier_name |
ipdl-analyze.rs | use std::env;
use std::path::Path;
use std::path::PathBuf;
use std::fs::File;
use std::io::Write;
extern crate tools;
extern crate ipdl_parser;
extern crate getopts;
use getopts::Options;
use tools::file_format::analysis::{read_analysis, read_target, WithLocation, AnalysisTarget, AnalysisKind};
use ipdl_parser::parser;
use ipdl_parser::ast;
type TargetAnalysis = Vec<WithLocation<Vec<AnalysisTarget>>>;
fn get_options_parser() -> Options {
let mut opts = Options::new();
opts.optmulti("I", "include",
"Additional directory to search for included protocol specifications",
"DIR");
opts.reqopt("d", "outheaders-dir",
"Directory into which C++ headers analysis data is location.",
"HDR_DIR");
opts.reqopt("b", "base-input-prefix",
"Base directory where IPDL input files are found.",
"BASE_DIR");
opts.reqopt("a", "analysis-prefix",
"Base directory where analysis output files are found.",
"ANALYSIS_DIR");
opts
}
fn header_file_name(outheaders_dir: &str, ns: &ast::Namespace, parent_or_child: &str) -> String {
format!("{}/{}/{}{}.h",
outheaders_dir,
ns.namespaces.clone().join("/"),
ns.name.id,
parent_or_child)
}
fn mangle_simple(s: &str) -> String |
fn mangle_nested_name(ns: &[String], protocol: &str, name: &str) -> String {
format!("_ZN{}{}{}E",
ns.iter().map(|id| mangle_simple(&id)).collect::<Vec<_>>().join(""),
mangle_simple(protocol),
mangle_simple(name))
}
fn find_analysis<'a>(analysis: &'a TargetAnalysis, mangled: &str) -> Option<&'a AnalysisTarget>
{
for datum in analysis {
for piece in &datum.data {
if piece.kind == AnalysisKind::Decl && piece.sym.contains(mangled) {
return Some(&piece);
}
}
}
println!("No analysis target found for {}", mangled);
return None
}
fn output_data(outputf: &mut File, locstr: &str, datum: &AnalysisTarget) {
write!(outputf, r#"{{"loc": "{}", "target": 1, "kind": "idl", "pretty": "{}", "sym": "{}"}}"#,
locstr, datum.pretty, datum.sym).unwrap();
write!(outputf, "\n").unwrap();
write!(outputf, r#"{{"loc": "{}", "source": 1, "pretty": "{}", "sym": "{}"}}"#,
locstr, datum.pretty, datum.sym).unwrap();
write!(outputf, "\n").unwrap();
}
fn output_send_recv(outputf: &mut File,
locstr: &str,
protocol: &ast::Namespace,
message: &ast::MessageDecl,
is_ctor: bool,
send_side: &str, send_analysis: &TargetAnalysis,
recv_side: &str, recv_analysis: &TargetAnalysis)
{
let send_prefix = if message.send_semantics == ast::SendSemantics::Intr { "Call" } else { "Send" };
let recv_prefix = if message.send_semantics == ast::SendSemantics::Intr { "Answer" } else { "Recv" };
let ctor_suffix = if is_ctor { "Constructor" } else { "" };
let mangled = mangle_nested_name(&protocol.namespaces,
&format!("{}{}", protocol.name.id, send_side),
&format!("{}{}{}", send_prefix, message.name.id, ctor_suffix));
if let Some(send_datum) = find_analysis(send_analysis, &mangled) {
output_data(outputf, &locstr, &send_datum);
}
let mangled = mangle_nested_name(&protocol.namespaces,
&format!("{}{}", protocol.name.id, recv_side),
&format!("{}{}{}", recv_prefix, message.name.id, ctor_suffix));
if let Some(recv_datum) = find_analysis(recv_analysis, &mangled) {
output_data(outputf, &locstr, &recv_datum);
}
}
fn main() {
let args : Vec<String> = env::args().collect();
let opts = get_options_parser();
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m },
Err(f) => { panic!(f.to_string()) },
};
let mut include_dirs = Vec::new();
for i in matches.opt_strs("I") {
include_dirs.push(PathBuf::from(i))
}
let outheaders_dir = matches.opt_str("d").unwrap();
let base_dir = matches.opt_str("b").unwrap();
let analysis_dir = matches.opt_str("a").unwrap();
let base_path = Path::new(&base_dir);
let analysis_path = Path::new(&analysis_dir);
let mut file_names = Vec::new();
for f in matches.free {
file_names.push(PathBuf::from(f));
}
let maybe_tus = parser::parse(&include_dirs, file_names);
if maybe_tus.is_none() {
println!("Specification could not be parsed.");
return;
}
let tus = maybe_tus.unwrap();
for (_, tu) in tus {
println!("Analyzing {:?}", tu.file_name);
let path = tu.file_name.as_path();
let relative = path.strip_prefix(base_path).unwrap();
let absolute = analysis_path.join(relative);
let mut outputf = File::create(absolute).unwrap();
if let Some((ns, protocol)) = tu.protocol {
let parent_fname = header_file_name(&outheaders_dir, &ns, "Parent");
let parent_analysis = read_analysis(&parent_fname, &read_target);
let child_fname = header_file_name(&outheaders_dir, &ns, "Child");
let child_analysis = read_analysis(&child_fname, &read_target);
let is_toplevel = protocol.managers.len() == 0;
for message in protocol.messages {
let loc = &message.name.loc;
let locstr = format!("{}:{}-{}", loc.lineno, loc.colno, loc.colno + message.name.id.len());
if is_toplevel && message.name.id == "__delete__" {
continue;
}
let is_ctor = protocol.manages.iter().any(|e| e.id == message.name.id);
if message.direction == ast::Direction::ToChild || message.direction == ast::Direction::ToParentOrChild {
output_send_recv(&mut outputf, &locstr, &ns, &message, is_ctor,
"Parent", &parent_analysis, "Child", &child_analysis);
}
if message.direction == ast::Direction::ToParent || message.direction == ast::Direction::ToParentOrChild {
output_send_recv(&mut outputf, &locstr, &ns, &message, is_ctor,
"Child", &child_analysis, "Parent", &parent_analysis);
}
}
}
}
}
| {
format!("{}{}", s.len(), s)
} | identifier_body |
ipdl-analyze.rs | use std::env;
use std::path::Path;
use std::path::PathBuf;
use std::fs::File;
use std::io::Write;
extern crate tools;
extern crate ipdl_parser;
extern crate getopts;
use getopts::Options;
use tools::file_format::analysis::{read_analysis, read_target, WithLocation, AnalysisTarget, AnalysisKind};
use ipdl_parser::parser;
use ipdl_parser::ast;
type TargetAnalysis = Vec<WithLocation<Vec<AnalysisTarget>>>;
fn get_options_parser() -> Options {
let mut opts = Options::new();
opts.optmulti("I", "include",
"Additional directory to search for included protocol specifications",
"DIR");
opts.reqopt("d", "outheaders-dir",
"Directory into which C++ headers analysis data is location.",
"HDR_DIR");
opts.reqopt("b", "base-input-prefix",
"Base directory where IPDL input files are found.",
"BASE_DIR");
opts.reqopt("a", "analysis-prefix",
"Base directory where analysis output files are found.",
"ANALYSIS_DIR");
opts
}
fn header_file_name(outheaders_dir: &str, ns: &ast::Namespace, parent_or_child: &str) -> String {
format!("{}/{}/{}{}.h",
outheaders_dir,
ns.namespaces.clone().join("/"),
ns.name.id,
parent_or_child)
}
fn mangle_simple(s: &str) -> String {
format!("{}{}", s.len(), s)
}
fn mangle_nested_name(ns: &[String], protocol: &str, name: &str) -> String {
format!("_ZN{}{}{}E",
ns.iter().map(|id| mangle_simple(&id)).collect::<Vec<_>>().join(""),
mangle_simple(protocol),
mangle_simple(name))
}
fn find_analysis<'a>(analysis: &'a TargetAnalysis, mangled: &str) -> Option<&'a AnalysisTarget>
{
for datum in analysis {
for piece in &datum.data {
if piece.kind == AnalysisKind::Decl && piece.sym.contains(mangled) {
return Some(&piece);
}
}
}
println!("No analysis target found for {}", mangled);
return None
}
fn output_data(outputf: &mut File, locstr: &str, datum: &AnalysisTarget) {
write!(outputf, r#"{{"loc": "{}", "target": 1, "kind": "idl", "pretty": "{}", "sym": "{}"}}"#,
locstr, datum.pretty, datum.sym).unwrap();
write!(outputf, "\n").unwrap();
write!(outputf, r#"{{"loc": "{}", "source": 1, "pretty": "{}", "sym": "{}"}}"#,
locstr, datum.pretty, datum.sym).unwrap();
write!(outputf, "\n").unwrap();
}
fn output_send_recv(outputf: &mut File,
locstr: &str,
protocol: &ast::Namespace,
message: &ast::MessageDecl,
is_ctor: bool,
send_side: &str, send_analysis: &TargetAnalysis,
recv_side: &str, recv_analysis: &TargetAnalysis)
{
let send_prefix = if message.send_semantics == ast::SendSemantics::Intr { "Call" } else { "Send" };
let recv_prefix = if message.send_semantics == ast::SendSemantics::Intr { "Answer" } else | ;
let ctor_suffix = if is_ctor { "Constructor" } else { "" };
let mangled = mangle_nested_name(&protocol.namespaces,
&format!("{}{}", protocol.name.id, send_side),
&format!("{}{}{}", send_prefix, message.name.id, ctor_suffix));
if let Some(send_datum) = find_analysis(send_analysis, &mangled) {
output_data(outputf, &locstr, &send_datum);
}
let mangled = mangle_nested_name(&protocol.namespaces,
&format!("{}{}", protocol.name.id, recv_side),
&format!("{}{}{}", recv_prefix, message.name.id, ctor_suffix));
if let Some(recv_datum) = find_analysis(recv_analysis, &mangled) {
output_data(outputf, &locstr, &recv_datum);
}
}
fn main() {
let args : Vec<String> = env::args().collect();
let opts = get_options_parser();
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m },
Err(f) => { panic!(f.to_string()) },
};
let mut include_dirs = Vec::new();
for i in matches.opt_strs("I") {
include_dirs.push(PathBuf::from(i))
}
let outheaders_dir = matches.opt_str("d").unwrap();
let base_dir = matches.opt_str("b").unwrap();
let analysis_dir = matches.opt_str("a").unwrap();
let base_path = Path::new(&base_dir);
let analysis_path = Path::new(&analysis_dir);
let mut file_names = Vec::new();
for f in matches.free {
file_names.push(PathBuf::from(f));
}
let maybe_tus = parser::parse(&include_dirs, file_names);
if maybe_tus.is_none() {
println!("Specification could not be parsed.");
return;
}
let tus = maybe_tus.unwrap();
for (_, tu) in tus {
println!("Analyzing {:?}", tu.file_name);
let path = tu.file_name.as_path();
let relative = path.strip_prefix(base_path).unwrap();
let absolute = analysis_path.join(relative);
let mut outputf = File::create(absolute).unwrap();
if let Some((ns, protocol)) = tu.protocol {
let parent_fname = header_file_name(&outheaders_dir, &ns, "Parent");
let parent_analysis = read_analysis(&parent_fname, &read_target);
let child_fname = header_file_name(&outheaders_dir, &ns, "Child");
let child_analysis = read_analysis(&child_fname, &read_target);
let is_toplevel = protocol.managers.len() == 0;
for message in protocol.messages {
let loc = &message.name.loc;
let locstr = format!("{}:{}-{}", loc.lineno, loc.colno, loc.colno + message.name.id.len());
if is_toplevel && message.name.id == "__delete__" {
continue;
}
let is_ctor = protocol.manages.iter().any(|e| e.id == message.name.id);
if message.direction == ast::Direction::ToChild || message.direction == ast::Direction::ToParentOrChild {
output_send_recv(&mut outputf, &locstr, &ns, &message, is_ctor,
"Parent", &parent_analysis, "Child", &child_analysis);
}
if message.direction == ast::Direction::ToParent || message.direction == ast::Direction::ToParentOrChild {
output_send_recv(&mut outputf, &locstr, &ns, &message, is_ctor,
"Child", &child_analysis, "Parent", &parent_analysis);
}
}
}
}
}
| { "Recv" } | conditional_block |
ipdl-analyze.rs | use std::env;
use std::path::Path;
use std::path::PathBuf;
use std::fs::File;
use std::io::Write;
extern crate tools;
extern crate ipdl_parser;
extern crate getopts;
use getopts::Options;
use tools::file_format::analysis::{read_analysis, read_target, WithLocation, AnalysisTarget, AnalysisKind};
use ipdl_parser::parser;
use ipdl_parser::ast;
type TargetAnalysis = Vec<WithLocation<Vec<AnalysisTarget>>>;
fn get_options_parser() -> Options {
let mut opts = Options::new();
opts.optmulti("I", "include",
"Additional directory to search for included protocol specifications",
"DIR");
opts.reqopt("d", "outheaders-dir",
"Directory into which C++ headers analysis data is location.",
"HDR_DIR");
opts.reqopt("b", "base-input-prefix",
"Base directory where IPDL input files are found.",
"BASE_DIR");
opts.reqopt("a", "analysis-prefix",
"Base directory where analysis output files are found.",
"ANALYSIS_DIR");
opts
}
fn header_file_name(outheaders_dir: &str, ns: &ast::Namespace, parent_or_child: &str) -> String {
format!("{}/{}/{}{}.h",
outheaders_dir,
ns.namespaces.clone().join("/"),
ns.name.id,
parent_or_child)
}
fn mangle_simple(s: &str) -> String {
format!("{}{}", s.len(), s)
}
fn mangle_nested_name(ns: &[String], protocol: &str, name: &str) -> String {
format!("_ZN{}{}{}E",
ns.iter().map(|id| mangle_simple(&id)).collect::<Vec<_>>().join(""),
mangle_simple(protocol),
mangle_simple(name))
}
fn find_analysis<'a>(analysis: &'a TargetAnalysis, mangled: &str) -> Option<&'a AnalysisTarget>
{
for datum in analysis {
for piece in &datum.data {
if piece.kind == AnalysisKind::Decl && piece.sym.contains(mangled) {
return Some(&piece);
}
}
}
println!("No analysis target found for {}", mangled);
return None
}
fn output_data(outputf: &mut File, locstr: &str, datum: &AnalysisTarget) {
write!(outputf, r#"{{"loc": "{}", "target": 1, "kind": "idl", "pretty": "{}", "sym": "{}"}}"#,
locstr, datum.pretty, datum.sym).unwrap();
write!(outputf, "\n").unwrap();
write!(outputf, r#"{{"loc": "{}", "source": 1, "pretty": "{}", "sym": "{}"}}"#,
locstr, datum.pretty, datum.sym).unwrap();
write!(outputf, "\n").unwrap();
}
fn output_send_recv(outputf: &mut File,
locstr: &str,
protocol: &ast::Namespace,
message: &ast::MessageDecl,
is_ctor: bool,
send_side: &str, send_analysis: &TargetAnalysis,
recv_side: &str, recv_analysis: &TargetAnalysis)
{
let send_prefix = if message.send_semantics == ast::SendSemantics::Intr { "Call" } else { "Send" };
let recv_prefix = if message.send_semantics == ast::SendSemantics::Intr { "Answer" } else { "Recv" };
let ctor_suffix = if is_ctor { "Constructor" } else { "" };
let mangled = mangle_nested_name(&protocol.namespaces,
&format!("{}{}", protocol.name.id, send_side),
&format!("{}{}{}", send_prefix, message.name.id, ctor_suffix));
if let Some(send_datum) = find_analysis(send_analysis, &mangled) {
output_data(outputf, &locstr, &send_datum);
}
let mangled = mangle_nested_name(&protocol.namespaces,
&format!("{}{}", protocol.name.id, recv_side),
&format!("{}{}{}", recv_prefix, message.name.id, ctor_suffix));
if let Some(recv_datum) = find_analysis(recv_analysis, &mangled) {
output_data(outputf, &locstr, &recv_datum);
}
}
fn main() {
let args : Vec<String> = env::args().collect();
let opts = get_options_parser();
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m },
Err(f) => { panic!(f.to_string()) },
};
let mut include_dirs = Vec::new();
for i in matches.opt_strs("I") {
include_dirs.push(PathBuf::from(i))
}
let outheaders_dir = matches.opt_str("d").unwrap();
let base_dir = matches.opt_str("b").unwrap();
let analysis_dir = matches.opt_str("a").unwrap(); | let base_path = Path::new(&base_dir);
let analysis_path = Path::new(&analysis_dir);
let mut file_names = Vec::new();
for f in matches.free {
file_names.push(PathBuf::from(f));
}
let maybe_tus = parser::parse(&include_dirs, file_names);
if maybe_tus.is_none() {
println!("Specification could not be parsed.");
return;
}
let tus = maybe_tus.unwrap();
for (_, tu) in tus {
println!("Analyzing {:?}", tu.file_name);
let path = tu.file_name.as_path();
let relative = path.strip_prefix(base_path).unwrap();
let absolute = analysis_path.join(relative);
let mut outputf = File::create(absolute).unwrap();
if let Some((ns, protocol)) = tu.protocol {
let parent_fname = header_file_name(&outheaders_dir, &ns, "Parent");
let parent_analysis = read_analysis(&parent_fname, &read_target);
let child_fname = header_file_name(&outheaders_dir, &ns, "Child");
let child_analysis = read_analysis(&child_fname, &read_target);
let is_toplevel = protocol.managers.len() == 0;
for message in protocol.messages {
let loc = &message.name.loc;
let locstr = format!("{}:{}-{}", loc.lineno, loc.colno, loc.colno + message.name.id.len());
if is_toplevel && message.name.id == "__delete__" {
continue;
}
let is_ctor = protocol.manages.iter().any(|e| e.id == message.name.id);
if message.direction == ast::Direction::ToChild || message.direction == ast::Direction::ToParentOrChild {
output_send_recv(&mut outputf, &locstr, &ns, &message, is_ctor,
"Parent", &parent_analysis, "Child", &child_analysis);
}
if message.direction == ast::Direction::ToParent || message.direction == ast::Direction::ToParentOrChild {
output_send_recv(&mut outputf, &locstr, &ns, &message, is_ctor,
"Child", &child_analysis, "Parent", &parent_analysis);
}
}
}
}
} | random_line_split |
|
name.rs | #![macro_use]
use std::{
cell::RefCell,
collections::{HashMap, HashSet},
fmt,
string::String,
};
/// An interned, freshenable identifier.
/// Generally, one creates names with `n()` (short for `Name::global()`);
/// two names created this way with the same spelling will be treated as the same name.
/// Hygiene comes from freshening (implemented in `alpha.rs`, invoked in `walk_mode.rs`).
/// If a name is created in an unusual way that might cause it to collide,
/// `Name::gensym()` ensures uniqueness.
/// Only names that were copied or clone from the original will compare equal.
#[derive(PartialEq, Eq, Clone, Copy, Hash)]
pub struct Name {
id: usize,
}
pub struct Spelling {
// No two different variables have this the same. Tomatoes may have been added:
unique: String,
// The original spelling that the programmer chose.
orig: String,
}
thread_local! {
// From `Spelling.unique` to `id`s:
static id_map: RefCell<HashMap<String, usize>> = RefCell::new(HashMap::new());
// From `id`s to `Spelling`s
static spellings: RefCell<Vec<Spelling>> = RefCell::new(vec![]);
static printables: RefCell<HashMap<usize, String>> = RefCell::new(HashMap::new());
// The values of `printables`, for lookup purposes.
static printables_used: RefCell<HashSet<String>> = RefCell::new(HashSet::new());
// Should we do "naive" freshening for testing purposes?
static fake_freshness: RefCell<bool> = RefCell::new(false);
}
impl crate::runtime::reify::Reifiable for Name {
fn ty_name() -> Name { n("Name") }
fn reify(&self) -> crate::runtime::eval::Value { val!(ast(at * self)) }
fn reflect(v: &crate::runtime::eval::Value) -> Name {
extract!((v) crate::runtime::eval::Value::AbstractSyntax = (ref ast)
=> ast.to_name())
}
}
impl std::cmp::PartialOrd for Name {
fn partial_cmp(&self, other: &Name) -> Option<std::cmp::Ordering> {
Some(self.orig_sp().cmp(&other.orig_sp()))
}
}
impl std::cmp::Ord for Name {
fn cmp(&self, other: &Name) -> std::cmp::Ordering { self.orig_sp().cmp(&other.orig_sp()) }
}
// These are for isolating tests of alpha-equivalence from each other.
pub fn enable_fake_freshness(ff: bool) {
fake_freshness.with(|fake_freshness_| {
*fake_freshness_.borrow_mut() = ff;
})
}
// only available on nightly:
// impl!Send for Name {}
impl Name {
/// Two names that are unequal to each other will have different "spelling"s.
/// Tomatoes (🍅) may have been added to the end to ensure uniqueness.
pub fn sp(self) -> String { spellings.with(|us| us.borrow()[self.id].unique.clone()) }
/// The "original spelling" of a name; the string that was used to define it. These may collide.
pub fn orig_sp(self) -> String { spellings.with(|us| us.borrow()[self.id].orig.clone()) }
/// This extracts the "original" `Name`, prior to any freshening.
/// This is probably not ever the *right* thing to do, but may be needed as a workaround.
pub fn unhygienic_orig(self) -> Name {
spellings.with(|us| Name::new(&us.borrow()[self.id].orig, false))
}
/// Printable names are unique, like names from `sp()`, but generated lazily.
/// So, if the compiler freshens some name a bunch of times, producing a tomato-filled mess,
/// but only prints one version of the name, it gets to print an unadorned name.
/// If absolutely necessary to avoid collision, carrots (🥕) are added to the end.
pub fn print(self) -> String {
printables.with(|printables_| {
printables_used.with(|printables_used_| {
printables_
.borrow_mut()
.entry(self.id)
.or_insert_with(|| {
let mut print_version = self.orig_sp();
while printables_used_.borrow().contains(&print_version) {
// Graffiti seen at Berkley: "EⒶT YOUR VEGETABLES 🥕"
print_version = format!("{}🥕", print_version);
}
printables_used_.borrow_mut().insert(print_version.clone());
print_version.clone()
})
.clone()
})
})
}
pub fn global(s: &str) -> Name { Name::new(s, false) }
pub fn gensym(s: &str) -> Name { Name::new(s, true) }
pub fn freshen(self) -> Name { Name::new(&self.orig_sp(), true) }
fn new(orig_spelling: &str, freshen: bool) -> Name {
let fake_freshness_ = fake_freshness.with(|ff| *ff.borrow());
id_map.with(|id_map_| {
let mut unique_spelling = orig_spelling.to_owned();
// Find a fresh version by adding tomatoes, if requested:
while freshen && id_map_.borrow().contains_key(&unique_spelling) {
unique_spelling = format!("{}🍅", unique_spelling);
}
if freshen && fake_freshness_ {
// Forget doing it right; only add exactly one tomato:
unique_spelling = format!("{}🍅", orig_spelling);
}
let claim_id = || {
spellings.with(|spellings_| {
let new_id = spellings_.borrow().len();
spellings_.borrow_mut().push(Spelling {
unique: unique_spelling.clone(),
orig: orig_spelling.to_owned(),
});
new_id
})
};
// If we're faking freshness, make the freshened name findable. Otherwise...
let id = if freshen &&!fake_freshness_ {
claim_id() //...don't put it in the table
} else {
*id_map_.borrow_mut().entry(unique_spelling.clone()).or_insert_with(claim_id)
};
Name { id: id }
})
}
pub fn is(self, s: &str) -> bool { self.sp() == s }
pub fn is_name(self, n: Name) -> bool { self.sp() == n.sp() }
}
impl From<&str> for Name {
fn from(s: &str) -> Name { Name::global(s) }
}
impl From<&String> for Name {
fn from(s: &String) -> Name { Name::global(&*s) }
}
// TODO: move to `ast_walk`
// TODO: using `lazy_static!` (with or without gensym) makes some tests fail. Why?
/// Special name for negative `ast_walk`ing
pub fn negative_ret_val() -> Name { Name::global("⋄") }
impl fmt::Debug for Name {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "«{}»", self.sp()) }
}
impl fmt::Display for Name {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.print()) }
}
pub fn n(s: &str) -> Name { Name::global(s) }
#[test]
fn name_interning() {
// This test fails under tarpaulin; why? It must be related to `thread_local!` somehow...
let a = n("a");
assert_eq!(a, a);
assert_eq!(a, n("a"));
assert_ne!(a, a.freshen());
assert_eq!(a, a.freshen().unhygienic_orig());
assert_ne!(a, n("x🍅"));
assert_ne!(a.freshen(), a.freshen());
assert_ne!(n("a"), n("y"));
enable_fake_freshness(true);
let x = n("x");
assert_eq!(x, x);
assert_eq!(x, n("x"));
assert_ne!(x, x.freshen());
//... but now we the freshened version of `x` is accessible (and doesn't avoid existing names)
assert_eq!(x.freshen(), n("x🍅"));
assert_eq!(x.freshen(), x.freshen());
| // Printable versions are first-come, first-served
assert_eq!(a.freshen().print(), "a");
assert_eq!(a.print(), "a🥕");
} | random_line_split |
|
name.rs | #![macro_use]
use std::{
cell::RefCell,
collections::{HashMap, HashSet},
fmt,
string::String,
};
/// An interned, freshenable identifier.
/// Generally, one creates names with `n()` (short for `Name::global()`);
/// two names created this way with the same spelling will be treated as the same name.
/// Hygiene comes from freshening (implemented in `alpha.rs`, invoked in `walk_mode.rs`).
/// If a name is created in an unusual way that might cause it to collide,
/// `Name::gensym()` ensures uniqueness.
/// Only names that were copied or clone from the original will compare equal.
#[derive(PartialEq, Eq, Clone, Copy, Hash)]
pub struct Name {
id: usize,
}
pub struct Spelling {
// No two different variables have this the same. Tomatoes may have been added:
unique: String,
// The original spelling that the programmer chose.
orig: String,
}
thread_local! {
// From `Spelling.unique` to `id`s:
static id_map: RefCell<HashMap<String, usize>> = RefCell::new(HashMap::new());
// From `id`s to `Spelling`s
static spellings: RefCell<Vec<Spelling>> = RefCell::new(vec![]);
static printables: RefCell<HashMap<usize, String>> = RefCell::new(HashMap::new());
// The values of `printables`, for lookup purposes.
static printables_used: RefCell<HashSet<String>> = RefCell::new(HashSet::new());
// Should we do "naive" freshening for testing purposes?
static fake_freshness: RefCell<bool> = RefCell::new(false);
}
impl crate::runtime::reify::Reifiable for Name {
fn ty_name() -> Name { n("Name") }
fn reify(&self) -> crate::runtime::eval::Value { val!(ast(at * self)) }
fn reflect(v: &crate::runtime::eval::Value) -> Name {
extract!((v) crate::runtime::eval::Value::AbstractSyntax = (ref ast)
=> ast.to_name())
}
}
impl std::cmp::PartialOrd for Name {
fn partial_cmp(&self, other: &Name) -> Option<std::cmp::Ordering> {
Some(self.orig_sp().cmp(&other.orig_sp()))
}
}
impl std::cmp::Ord for Name {
fn cmp(&self, other: &Name) -> std::cmp::Ordering { self.orig_sp().cmp(&other.orig_sp()) }
}
// These are for isolating tests of alpha-equivalence from each other.
pub fn enable_fake_freshness(ff: bool) {
fake_freshness.with(|fake_freshness_| {
*fake_freshness_.borrow_mut() = ff;
})
}
// only available on nightly:
// impl!Send for Name {}
impl Name {
/// Two names that are unequal to each other will have different "spelling"s.
/// Tomatoes (🍅) may have been added to the end to ensure uniqueness.
pub fn sp(self) -> String { spellings.with(|us| us.borrow()[self.id].unique.clone()) }
/// The "original spelling" of a name; the string that was used to define it. These may collide.
pub fn orig_sp(self) -> String { spellings.with(|us| us.borrow()[self.id].orig.clone()) }
/// This extracts the "original" `Name`, prior to any freshening.
/// This is probably not ever the *right* thing to do, but may be needed as a workaround.
pub fn unhygienic_orig(self) -> Name {
spellings.with(|us| Name::new(&us.borrow()[self.id].orig, false))
}
/// Printable names are unique, like names from `sp()`, but generated lazily.
/// So, if the compiler freshens some name a bunch of times, producing a tomato-filled mess,
/// but only prints one version of the name, it gets to print an unadorned name.
/// If absolutely necessary to avoid collision, carrots (🥕) are added to the end.
pub fn print(self) -> String {
printables.with(|printables_| {
printables_used.with(|printables_used_| {
printables_
.borrow_mut()
.entry(self.id)
.or_insert_with(|| {
let mut print_version = self.orig_sp();
while printables_used_.borrow().contains(&print_version) {
// Graffiti seen at Berkley: "EⒶT YOUR VEGETABLES 🥕"
print_version = format!("{}🥕", print_version);
}
printables_used_.borrow_mut().insert(print_version.clone());
print_version.clone()
})
.clone()
})
})
}
pub fn global(s: &str) -> Name { Name::new(s, false) }
pub fn gensym(s: &str) -> Name { Name::new(s, true) }
pub fn freshen(self) -> Name { Name::new(&self.orig_sp(), true) }
fn new(orig_spelling: &str, freshen: bool) -> Name {
let fake_freshness_ = fake_freshness.with(|ff| *ff.borrow());
id_map.with(|id_map_| {
let mut unique_spelling = orig_spelling.to_owned();
// Find a fresh version by adding tomatoes, if requested:
while freshen && id_map_.borrow().contains_key(&unique_spelling) {
unique_spelling = format!("{}🍅", unique_spelling);
}
if freshen && fake_freshness_ {
// Forget doing it right; only add exactly one tomato:
unique_spelling = format!("{}🍅", orig_spelling);
}
let claim_id = || {
spellings.with(|spellings_| {
let new_id = spellings_.borrow().len();
spellings_.borrow_mut().push(Spelling {
unique: unique_spelling.clone(),
orig: orig_spelling.to_owned(),
});
new_id
})
};
// If we're faking freshness, make the freshened name findable. Otherwise...
let id = if freshen &&!fake_freshness_ {
cl | *id_map_.borrow_mut().entry(unique_spelling.clone()).or_insert_with(claim_id)
};
Name { id: id }
})
}
pub fn is(self, s: &str) -> bool { self.sp() == s }
pub fn is_name(self, n: Name) -> bool { self.sp() == n.sp() }
}
impl From<&str> for Name {
fn from(s: &str) -> Name { Name::global(s) }
}
impl From<&String> for Name {
fn from(s: &String) -> Name { Name::global(&*s) }
}
// TODO: move to `ast_walk`
// TODO: using `lazy_static!` (with or without gensym) makes some tests fail. Why?
/// Special name for negative `ast_walk`ing
pub fn negative_ret_val() -> Name { Name::global("⋄") }
impl fmt::Debug for Name {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "«{}»", self.sp()) }
}
impl fmt::Display for Name {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.print()) }
}
pub fn n(s: &str) -> Name { Name::global(s) }
#[test]
fn name_interning() {
// This test fails under tarpaulin; why? It must be related to `thread_local!` somehow...
let a = n("a");
assert_eq!(a, a);
assert_eq!(a, n("a"));
assert_ne!(a, a.freshen());
assert_eq!(a, a.freshen().unhygienic_orig());
assert_ne!(a, n("x🍅"));
assert_ne!(a.freshen(), a.freshen());
assert_ne!(n("a"), n("y"));
enable_fake_freshness(true);
let x = n("x");
assert_eq!(x, x);
assert_eq!(x, n("x"));
assert_ne!(x, x.freshen());
//... but now we the freshened version of `x` is accessible (and doesn't avoid existing names)
assert_eq!(x.freshen(), n("x🍅"));
assert_eq!(x.freshen(), x.freshen());
// Printable versions are first-come, first-served
assert_eq!(a.freshen().print(), "a");
assert_eq!(a.print(), "a🥕");
}
| aim_id() // ...don't put it in the table
} else {
| conditional_block |
name.rs | #![macro_use]
use std::{
cell::RefCell,
collections::{HashMap, HashSet},
fmt,
string::String,
};
/// An interned, freshenable identifier.
/// Generally, one creates names with `n()` (short for `Name::global()`);
/// two names created this way with the same spelling will be treated as the same name.
/// Hygiene comes from freshening (implemented in `alpha.rs`, invoked in `walk_mode.rs`).
/// If a name is created in an unusual way that might cause it to collide,
/// `Name::gensym()` ensures uniqueness.
/// Only names that were copied or clone from the original will compare equal.
#[derive(PartialEq, Eq, Clone, Copy, Hash)]
pub struct Name {
id: usize,
}
pub struct Spelling {
// No two different variables have this the same. Tomatoes may have been added:
unique: String,
// The original spelling that the programmer chose.
orig: String,
}
thread_local! {
// From `Spelling.unique` to `id`s:
static id_map: RefCell<HashMap<String, usize>> = RefCell::new(HashMap::new());
// From `id`s to `Spelling`s
static spellings: RefCell<Vec<Spelling>> = RefCell::new(vec![]);
static printables: RefCell<HashMap<usize, String>> = RefCell::new(HashMap::new());
// The values of `printables`, for lookup purposes.
static printables_used: RefCell<HashSet<String>> = RefCell::new(HashSet::new());
// Should we do "naive" freshening for testing purposes?
static fake_freshness: RefCell<bool> = RefCell::new(false);
}
impl crate::runtime::reify::Reifiable for Name {
fn ty_name() -> Name { n("Name") }
fn reify(&self) -> crate::runtime::eval::Value { val!(ast(at * self)) }
fn reflect(v: &crate::runtime::eval::Value) -> Name {
extract!((v) crate::runtime::eval::Value::AbstractSyntax = (ref ast)
=> ast.to_name())
}
}
impl std::cmp::PartialOrd for Name {
fn partial_cmp(&self, other: &Name) -> Option<std::cmp::Ordering> {
Some(self.orig_sp().cmp(&other.orig_sp()))
}
}
impl std::cmp::Ord for Name {
fn cmp(&self, other: &Name) -> std::cmp::Ordering { self.orig_sp().cmp(&other.orig_sp()) }
}
// These are for isolating tests of alpha-equivalence from each other.
pub fn enable_fake_freshness(ff: bool) {
fake_freshness.with(|fake_freshness_| {
*fake_freshness_.borrow_mut() = ff;
})
}
// only available on nightly:
// impl!Send for Name {}
impl Name {
/// Two names that are unequal to each other will have different "spelling"s.
/// Tomatoes (🍅) may have been added to the end to ensure uniqueness.
pub fn sp(self) -> String { spellings.with(|us| us.borrow()[self.id].unique.clone()) }
/// The "original spelling" of a name; the string that was used to define it. These may collide.
pub fn orig_sp(self) -> String { spellings.with(|us| us.borrow()[self.id].orig.clone()) }
/// This extracts the "original" `Name`, prior to any freshening.
/// This is probably not ever the *right* thing to do, but may be needed as a workaround.
pub fn unhygienic_orig(self) -> Name {
spellings.with(|us| Name::new(&us.borrow()[self.id].orig, false))
}
/// Printable names are unique, like names from `sp()`, but generated lazily.
/// So, if the compiler freshens some name a bunch of times, producing a tomato-filled mess,
/// but only prints one version of the name, it gets to print an unadorned name.
/// If absolutely necessary to avoid collision, carrots (🥕) are added to the end.
pub fn print(self) -> String {
printables.with(|printables_| {
printables_used.with(|printables_used_| {
printables_
.borrow_mut()
.entry(self.id)
.or_insert_with(|| {
let mut print_version = self.orig_sp();
while printables_used_.borrow().contains(&print_version) {
// Graffiti seen at Berkley: "EⒶT YOUR VEGETABLES 🥕"
print_version = format!("{}🥕", print_version);
}
printables_used_.borrow_mut().insert(print_version.clone());
print_version.clone()
})
.clone()
})
})
}
pub fn global(s: &str) -> Name { Name::new(s, false) }
pub fn gensym(s: &str | ame { Name::new(s, true) }
pub fn freshen(self) -> Name { Name::new(&self.orig_sp(), true) }
fn new(orig_spelling: &str, freshen: bool) -> Name {
let fake_freshness_ = fake_freshness.with(|ff| *ff.borrow());
id_map.with(|id_map_| {
let mut unique_spelling = orig_spelling.to_owned();
// Find a fresh version by adding tomatoes, if requested:
while freshen && id_map_.borrow().contains_key(&unique_spelling) {
unique_spelling = format!("{}🍅", unique_spelling);
}
if freshen && fake_freshness_ {
// Forget doing it right; only add exactly one tomato:
unique_spelling = format!("{}🍅", orig_spelling);
}
let claim_id = || {
spellings.with(|spellings_| {
let new_id = spellings_.borrow().len();
spellings_.borrow_mut().push(Spelling {
unique: unique_spelling.clone(),
orig: orig_spelling.to_owned(),
});
new_id
})
};
// If we're faking freshness, make the freshened name findable. Otherwise...
let id = if freshen &&!fake_freshness_ {
claim_id() //...don't put it in the table
} else {
*id_map_.borrow_mut().entry(unique_spelling.clone()).or_insert_with(claim_id)
};
Name { id: id }
})
}
pub fn is(self, s: &str) -> bool { self.sp() == s }
pub fn is_name(self, n: Name) -> bool { self.sp() == n.sp() }
}
impl From<&str> for Name {
fn from(s: &str) -> Name { Name::global(s) }
}
impl From<&String> for Name {
fn from(s: &String) -> Name { Name::global(&*s) }
}
// TODO: move to `ast_walk`
// TODO: using `lazy_static!` (with or without gensym) makes some tests fail. Why?
/// Special name for negative `ast_walk`ing
pub fn negative_ret_val() -> Name { Name::global("⋄") }
impl fmt::Debug for Name {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "«{}»", self.sp()) }
}
impl fmt::Display for Name {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.print()) }
}
pub fn n(s: &str) -> Name { Name::global(s) }
#[test]
fn name_interning() {
// This test fails under tarpaulin; why? It must be related to `thread_local!` somehow...
let a = n("a");
assert_eq!(a, a);
assert_eq!(a, n("a"));
assert_ne!(a, a.freshen());
assert_eq!(a, a.freshen().unhygienic_orig());
assert_ne!(a, n("x🍅"));
assert_ne!(a.freshen(), a.freshen());
assert_ne!(n("a"), n("y"));
enable_fake_freshness(true);
let x = n("x");
assert_eq!(x, x);
assert_eq!(x, n("x"));
assert_ne!(x, x.freshen());
//... but now we the freshened version of `x` is accessible (and doesn't avoid existing names)
assert_eq!(x.freshen(), n("x🍅"));
assert_eq!(x.freshen(), x.freshen());
// Printable versions are first-come, first-served
assert_eq!(a.freshen().print(), "a");
assert_eq!(a.print(), "a🥕");
}
| ) -> N | identifier_name |
sparc64_unknown_linux_gnu.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | pub fn target() -> TargetResult {
let mut base = super::linux_base::opts();
base.cpu = "v9".to_string();
base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "sparc64-unknown-linux-gnu".to_string(),
target_endian: "big".to_string(),
target_pointer_width: "64".to_string(),
target_c_int_width: "32".to_string(),
data_layout: "E-m:e-i64:64-n32:64-S128".to_string(),
arch: "sparc64".to_string(),
target_os: "linux".to_string(),
target_env: "gnu".to_string(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
})
} | // option. This file may not be copied, modified, or distributed
// except according to those terms.
use spec::{LinkerFlavor, Target, TargetResult};
| random_line_split |
sparc64_unknown_linux_gnu.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use spec::{LinkerFlavor, Target, TargetResult};
pub fn | () -> TargetResult {
let mut base = super::linux_base::opts();
base.cpu = "v9".to_string();
base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "sparc64-unknown-linux-gnu".to_string(),
target_endian: "big".to_string(),
target_pointer_width: "64".to_string(),
target_c_int_width: "32".to_string(),
data_layout: "E-m:e-i64:64-n32:64-S128".to_string(),
arch: "sparc64".to_string(),
target_os: "linux".to_string(),
target_env: "gnu".to_string(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
})
}
| target | identifier_name |
sparc64_unknown_linux_gnu.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use spec::{LinkerFlavor, Target, TargetResult};
pub fn target() -> TargetResult | {
let mut base = super::linux_base::opts();
base.cpu = "v9".to_string();
base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "sparc64-unknown-linux-gnu".to_string(),
target_endian: "big".to_string(),
target_pointer_width: "64".to_string(),
target_c_int_width: "32".to_string(),
data_layout: "E-m:e-i64:64-n32:64-S128".to_string(),
arch: "sparc64".to_string(),
target_os: "linux".to_string(),
target_env: "gnu".to_string(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
})
} | identifier_body |
|
mod.rs | // Generated with./mk_vsl_tag from Varnish headers: include/tbl/vsl_tags.h include/tbl/vsl_tags_http.h include/vsl_int.h
// https://github.com/varnishcache/varnish-cache/blob/master/include/vapi/vsl_int.h
// https://github.com/varnishcache/varnish-cache/blob/master/include/tbl/vsl_tags.h
// https://github.com/varnishcache/varnish-cache/blob/master/include/tbl/vsl_tags_http.h
mod tag_e;
pub mod message;
pub mod parser;
use std::fmt::{self, Debug, Display};
use quick_error::ResultExt;
use nom;
use quick_error::quick_error;
use bitflags::bitflags;
use crate::maybe_string::MaybeStr;
pub use self::tag_e::VSL_tag_e as VslRecordTag;
bitflags! {
pub struct Marker: u8 {
const VSL_CLIENTMARKER = 0b0000_0001;
const VSL_BACKENDMARKER = 0b0000_0010;
}
}
impl Display for Marker {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "[{}{}]",
if self.contains(Marker::VSL_CLIENTMARKER) { "C" } else { " " },
if self.contains(Marker::VSL_BACKENDMARKER) { "B" } else { " " })
}
}
pub type VslIdent = u32;
#[derive(Debug)]
struct VslRecordHeader {
tag: u8,
len: u16,
marker: Marker,
ident: VslIdent,
}
pub struct VslRecord<'b> {
pub tag: VslRecordTag,
pub marker: Marker,
pub ident: VslIdent,
pub data: &'b[u8],
}
quick_error! {
#[derive(Debug)]
pub enum VslRecordParseError {
Nom(nom_err: String, tag: VslRecordTag, record: String) {
context(record: &'a VslRecord<'a>, err: nom::Err<&'a [u8]>) -> (format!("{}", err), record.tag, format!("{}", record))
display("Nom parser failed on {}: {}", record, nom_err)
}
}
}
impl<'b> VslRecord<'b> {
pub fn parse_data<T, P>(&'b self, parser: P) -> Result<T, VslRecordParseError> where
P: Fn(&'b [u8]) -> nom::IResult<&'b [u8], T> {
// Note: need type annotaion for the u32 error type as the output IResult has no Error
// variant that would help to infer it
let result: nom::IResult<_, Result<T, _>, u32> = opt_res!(self.data, complete!(parser));
// unwrap here is safe as complete! eliminates Incomplete variant and opt_res! remaining Error variant
result.unwrap().1.context(self).map_err(From::from)
}
pub fn is_client(&self) -> bool {
self.marker.contains(Marker::VSL_CLIENTMARKER)
}
pub fn is_backend(&self) -> bool {
self.marker.contains(Marker::VSL_BACKENDMARKER)
}
}
impl<'b> Debug for VslRecord<'b> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("VSL Record")
.field("tag", &self.tag)
.field("marker", &self.marker)
.field("ident", &self.ident)
.field("data", &MaybeStr::from_bytes(self.data))
.finish()
}
}
impl<'b> Display for VslRecord<'b> {
fn | (&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
let tag = format!("{:?}", self.tag);
if f.alternate() {
write!(f, "{} {:5} {:18} {}", self.marker, self.ident, tag, MaybeStr::from_bytes(self.data))
} else {
write!(f, "VSL record (marker: {} ident: {} tag: {} data: {:?})", self.marker, self.ident, tag, MaybeStr::from_bytes(self.data))
}
}
}
| fmt | identifier_name |
mod.rs | // Generated with./mk_vsl_tag from Varnish headers: include/tbl/vsl_tags.h include/tbl/vsl_tags_http.h include/vsl_int.h
// https://github.com/varnishcache/varnish-cache/blob/master/include/vapi/vsl_int.h
// https://github.com/varnishcache/varnish-cache/blob/master/include/tbl/vsl_tags.h
// https://github.com/varnishcache/varnish-cache/blob/master/include/tbl/vsl_tags_http.h
mod tag_e;
pub mod message;
pub mod parser;
use std::fmt::{self, Debug, Display};
use quick_error::ResultExt;
use nom;
use quick_error::quick_error;
use bitflags::bitflags;
use crate::maybe_string::MaybeStr;
pub use self::tag_e::VSL_tag_e as VslRecordTag;
bitflags! {
pub struct Marker: u8 {
const VSL_CLIENTMARKER = 0b0000_0001;
const VSL_BACKENDMARKER = 0b0000_0010;
}
}
impl Display for Marker {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "[{}{}]",
if self.contains(Marker::VSL_CLIENTMARKER) { "C" } else { " " },
if self.contains(Marker::VSL_BACKENDMARKER) { "B" } else { " " })
}
}
pub type VslIdent = u32;
#[derive(Debug)]
struct VslRecordHeader {
tag: u8,
len: u16,
marker: Marker,
ident: VslIdent,
}
pub struct VslRecord<'b> {
pub tag: VslRecordTag,
pub marker: Marker,
pub ident: VslIdent,
pub data: &'b[u8],
}
quick_error! {
#[derive(Debug)]
pub enum VslRecordParseError {
Nom(nom_err: String, tag: VslRecordTag, record: String) {
context(record: &'a VslRecord<'a>, err: nom::Err<&'a [u8]>) -> (format!("{}", err), record.tag, format!("{}", record))
display("Nom parser failed on {}: {}", record, nom_err)
}
}
}
impl<'b> VslRecord<'b> {
pub fn parse_data<T, P>(&'b self, parser: P) -> Result<T, VslRecordParseError> where
P: Fn(&'b [u8]) -> nom::IResult<&'b [u8], T> {
// Note: need type annotaion for the u32 error type as the output IResult has no Error
// variant that would help to infer it
let result: nom::IResult<_, Result<T, _>, u32> = opt_res!(self.data, complete!(parser));
// unwrap here is safe as complete! eliminates Incomplete variant and opt_res! remaining Error variant
result.unwrap().1.context(self).map_err(From::from)
}
pub fn is_client(&self) -> bool {
self.marker.contains(Marker::VSL_CLIENTMARKER)
}
pub fn is_backend(&self) -> bool {
self.marker.contains(Marker::VSL_BACKENDMARKER)
}
}
impl<'b> Debug for VslRecord<'b> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("VSL Record")
.field("tag", &self.tag)
.field("marker", &self.marker)
.field("ident", &self.ident)
.field("data", &MaybeStr::from_bytes(self.data))
.finish()
}
}
impl<'b> Display for VslRecord<'b> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
let tag = format!("{:?}", self.tag);
if f.alternate() | else {
write!(f, "VSL record (marker: {} ident: {} tag: {} data: {:?})", self.marker, self.ident, tag, MaybeStr::from_bytes(self.data))
}
}
}
| {
write!(f, "{} {:5} {:18} {}", self.marker, self.ident, tag, MaybeStr::from_bytes(self.data))
} | conditional_block |
mod.rs | // Generated with./mk_vsl_tag from Varnish headers: include/tbl/vsl_tags.h include/tbl/vsl_tags_http.h include/vsl_int.h
// https://github.com/varnishcache/varnish-cache/blob/master/include/vapi/vsl_int.h
// https://github.com/varnishcache/varnish-cache/blob/master/include/tbl/vsl_tags.h
// https://github.com/varnishcache/varnish-cache/blob/master/include/tbl/vsl_tags_http.h
mod tag_e;
pub mod message;
pub mod parser;
use std::fmt::{self, Debug, Display};
use quick_error::ResultExt;
use nom;
use quick_error::quick_error;
use bitflags::bitflags;
use crate::maybe_string::MaybeStr;
pub use self::tag_e::VSL_tag_e as VslRecordTag;
bitflags! {
pub struct Marker: u8 {
const VSL_CLIENTMARKER = 0b0000_0001;
const VSL_BACKENDMARKER = 0b0000_0010;
}
}
impl Display for Marker {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "[{}{}]",
if self.contains(Marker::VSL_CLIENTMARKER) { "C" } else { " " },
if self.contains(Marker::VSL_BACKENDMARKER) { "B" } else { " " })
}
}
pub type VslIdent = u32;
#[derive(Debug)]
struct VslRecordHeader {
tag: u8,
len: u16,
marker: Marker,
ident: VslIdent,
}
pub struct VslRecord<'b> {
pub tag: VslRecordTag,
pub marker: Marker,
pub ident: VslIdent,
pub data: &'b[u8],
}
quick_error! {
#[derive(Debug)]
pub enum VslRecordParseError {
Nom(nom_err: String, tag: VslRecordTag, record: String) {
context(record: &'a VslRecord<'a>, err: nom::Err<&'a [u8]>) -> (format!("{}", err), record.tag, format!("{}", record))
display("Nom parser failed on {}: {}", record, nom_err)
}
}
}
impl<'b> VslRecord<'b> {
pub fn parse_data<T, P>(&'b self, parser: P) -> Result<T, VslRecordParseError> where
P: Fn(&'b [u8]) -> nom::IResult<&'b [u8], T> {
// Note: need type annotaion for the u32 error type as the output IResult has no Error
// variant that would help to infer it
let result: nom::IResult<_, Result<T, _>, u32> = opt_res!(self.data, complete!(parser));
// unwrap here is safe as complete! eliminates Incomplete variant and opt_res! remaining Error variant
result.unwrap().1.context(self).map_err(From::from)
}
pub fn is_client(&self) -> bool {
self.marker.contains(Marker::VSL_CLIENTMARKER) |
pub fn is_backend(&self) -> bool {
self.marker.contains(Marker::VSL_BACKENDMARKER)
}
}
impl<'b> Debug for VslRecord<'b> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("VSL Record")
.field("tag", &self.tag)
.field("marker", &self.marker)
.field("ident", &self.ident)
.field("data", &MaybeStr::from_bytes(self.data))
.finish()
}
}
impl<'b> Display for VslRecord<'b> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
let tag = format!("{:?}", self.tag);
if f.alternate() {
write!(f, "{} {:5} {:18} {}", self.marker, self.ident, tag, MaybeStr::from_bytes(self.data))
} else {
write!(f, "VSL record (marker: {} ident: {} tag: {} data: {:?})", self.marker, self.ident, tag, MaybeStr::from_bytes(self.data))
}
}
} | } | random_line_split |
mod.rs | // Generated with./mk_vsl_tag from Varnish headers: include/tbl/vsl_tags.h include/tbl/vsl_tags_http.h include/vsl_int.h
// https://github.com/varnishcache/varnish-cache/blob/master/include/vapi/vsl_int.h
// https://github.com/varnishcache/varnish-cache/blob/master/include/tbl/vsl_tags.h
// https://github.com/varnishcache/varnish-cache/blob/master/include/tbl/vsl_tags_http.h
mod tag_e;
pub mod message;
pub mod parser;
use std::fmt::{self, Debug, Display};
use quick_error::ResultExt;
use nom;
use quick_error::quick_error;
use bitflags::bitflags;
use crate::maybe_string::MaybeStr;
pub use self::tag_e::VSL_tag_e as VslRecordTag;
bitflags! {
pub struct Marker: u8 {
const VSL_CLIENTMARKER = 0b0000_0001;
const VSL_BACKENDMARKER = 0b0000_0010;
}
}
impl Display for Marker {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "[{}{}]",
if self.contains(Marker::VSL_CLIENTMARKER) { "C" } else { " " },
if self.contains(Marker::VSL_BACKENDMARKER) { "B" } else { " " })
}
}
pub type VslIdent = u32;
#[derive(Debug)]
struct VslRecordHeader {
tag: u8,
len: u16,
marker: Marker,
ident: VslIdent,
}
pub struct VslRecord<'b> {
pub tag: VslRecordTag,
pub marker: Marker,
pub ident: VslIdent,
pub data: &'b[u8],
}
quick_error! {
#[derive(Debug)]
pub enum VslRecordParseError {
Nom(nom_err: String, tag: VslRecordTag, record: String) {
context(record: &'a VslRecord<'a>, err: nom::Err<&'a [u8]>) -> (format!("{}", err), record.tag, format!("{}", record))
display("Nom parser failed on {}: {}", record, nom_err)
}
}
}
impl<'b> VslRecord<'b> {
pub fn parse_data<T, P>(&'b self, parser: P) -> Result<T, VslRecordParseError> where
P: Fn(&'b [u8]) -> nom::IResult<&'b [u8], T> {
// Note: need type annotaion for the u32 error type as the output IResult has no Error
// variant that would help to infer it
let result: nom::IResult<_, Result<T, _>, u32> = opt_res!(self.data, complete!(parser));
// unwrap here is safe as complete! eliminates Incomplete variant and opt_res! remaining Error variant
result.unwrap().1.context(self).map_err(From::from)
}
pub fn is_client(&self) -> bool {
self.marker.contains(Marker::VSL_CLIENTMARKER)
}
pub fn is_backend(&self) -> bool {
self.marker.contains(Marker::VSL_BACKENDMARKER)
}
}
impl<'b> Debug for VslRecord<'b> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> |
}
impl<'b> Display for VslRecord<'b> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
let tag = format!("{:?}", self.tag);
if f.alternate() {
write!(f, "{} {:5} {:18} {}", self.marker, self.ident, tag, MaybeStr::from_bytes(self.data))
} else {
write!(f, "VSL record (marker: {} ident: {} tag: {} data: {:?})", self.marker, self.ident, tag, MaybeStr::from_bytes(self.data))
}
}
}
| {
f.debug_struct("VSL Record")
.field("tag", &self.tag)
.field("marker", &self.marker)
.field("ident", &self.ident)
.field("data", &MaybeStr::from_bytes(self.data))
.finish()
} | identifier_body |
network.rs | 10;
#[derive(Debug)]
struct ErrorAction {
retry_timeout: Duration,
max_retries: usize,
description: String,
}
impl ErrorAction {
fn new(config: &NetworkConfiguration, description: String) -> Self {
Self {
retry_timeout: Duration::from_millis(config.tcp_connect_retry_timeout),
max_retries: config.tcp_connect_max_retries as usize,
description,
}
}
}
impl ErrorHandler<io::Error> for ErrorAction {
type OutError = io::Error;
fn handle(&mut self, attempt: usize, e: io::Error) -> RetryPolicy<io::Error> {
log::trace!(
"{} failed [Attempt: {}/{}]: {}",
self.description,
attempt,
self.max_retries,
e
);
if attempt >= self.max_retries {
RetryPolicy::ForwardError(e)
} else {
let jitter = thread_rng().gen_range(0.5, 1.0);
let timeout = self.retry_timeout.mul_f64(jitter);
RetryPolicy::WaitRetry(timeout)
}
}
}
#[derive(Debug, Clone)]
pub enum ConnectedPeerAddr {
In(SocketAddr),
Out(String, SocketAddr),
}
impl ConnectedPeerAddr {
pub fn is_incoming(&self) -> bool {
match self {
Self::In(_) => true,
Self::Out(_, _) => false,
}
}
}
/// Network events.
#[derive(Debug)]
pub enum NetworkEvent {
/// A message was received from the network.
MessageReceived(Vec<u8>),
/// The node has connected to a peer.
PeerConnected {
/// Peer address.
addr: ConnectedPeerAddr,
/// Connect message.
connect: Box<Verified<Connect>>,
},
/// The node has disconnected from a peer.
PeerDisconnected(PublicKey),
/// Connection to a peer failed.
UnableConnectToPeer(PublicKey),
}
#[derive(Debug, Clone)]
pub enum NetworkRequest {
SendMessage(PublicKey, SignedMessage),
#[cfg(test)]
DisconnectWithPeer(PublicKey),
}
#[derive(Debug)]
pub struct NetworkPart {
pub our_connect_message: Verified<Connect>,
pub listen_address: SocketAddr,
pub network_config: NetworkConfiguration,
pub max_message_len: u32,
pub network_requests: mpsc::Receiver<NetworkRequest>,
pub network_tx: mpsc::Sender<NetworkEvent>,
pub(crate) connect_list: SharedConnectList,
}
#[derive(Clone, Debug)]
struct ConnectionPoolEntry {
sender: mpsc::Sender<SignedMessage>,
address: ConnectedPeerAddr,
// Connection ID assigned to the connection during instantiation. This ID is unique among
// all connections and is used in `ConnectList::remove()` to figure out whether
// it would make sense to remove a connection, or the request has been obsoleted.
id: u64,
}
#[derive(Clone, Debug)]
struct SharedConnectionPool {
inner: Arc<RwLock<ConnectionPool>>,
}
impl SharedConnectionPool {
fn new(our_key: PublicKey) -> Self {
Self {
inner: Arc::new(RwLock::new(ConnectionPool::new(our_key))),
}
}
fn read(&self) -> impl ops::Deref<Target = ConnectionPool> + '_ {
self.inner.read().unwrap()
}
fn write(&self) -> impl ops::DerefMut<Target = ConnectionPool> + '_ {
self.inner.write().unwrap()
}
async fn send_message(&self, peer_key: &PublicKey, message: SignedMessage) {
let maybe_peer_info = {
// Ensure that we don't hold the lock across the `await` point.
let peers = &self.inner.read().unwrap().peers;
peers
.get(peer_key)
.map(|peer| (peer.sender.clone(), peer.id))
};
if let Some((mut sender, connection_id)) = maybe_peer_info {
if sender.send(message).await.is_err() {
log::warn!("Cannot send message to peer {}", peer_key);
self.write().remove(peer_key, Some(connection_id));
}
}
}
fn create_connection(
&self,
peer_key: PublicKey,
address: ConnectedPeerAddr,
socket: Framed<TcpStream, MessagesCodec>,
) -> Option<Connection> {
let mut guard = self.write();
if guard.contains(&peer_key) && Self::ignore_connection(guard.our_key, peer_key) {
log::info!("Ignoring connection to {:?} per priority rules", peer_key);
return None;
}
let (receiver_rx, connection_id) = guard.add(peer_key, address.clone());
Some(Connection {
socket,
receiver_rx,
address,
key: peer_key,
id: connection_id,
})
}
/// Provides a complete, anti-symmetric relation among two peers bound in a connection.
/// This is used by the peers to decide which one of two connections are left alive
/// if the peers connect to each other simultaneously.
fn ignore_connection(our_key: PublicKey, their_key: PublicKey) -> bool {
our_key[..] < their_key[..]
}
}
#[derive(Debug)]
struct ConnectionPool {
peers: HashMap<PublicKey, ConnectionPoolEntry>,
our_key: PublicKey,
next_connection_id: u64,
}
impl ConnectionPool {
fn new(our_key: PublicKey) -> Self {
Self {
peers: HashMap::new(),
our_key,
next_connection_id: 0,
}
}
fn count_incoming(&self) -> usize {
self.peers
.values()
.filter(|entry| entry.address.is_incoming())
.count()
}
fn count_outgoing(&self) -> usize {
self.peers
.values()
.filter(|entry| entry.address.is_incoming())
.count()
}
/// Adds a peer to the connection list.
///
/// # Return value
///
/// Returns the receiver for outgoing messages to the peer and the connection ID.
fn add(
&mut self,
key: PublicKey,
address: ConnectedPeerAddr,
) -> (mpsc::Receiver<SignedMessage>, u64) {
let id = self.next_connection_id;
let (sender, receiver_rx) = mpsc::channel(OUTGOING_CHANNEL_SIZE);
let entry = ConnectionPoolEntry {
sender,
address,
id,
};
self.next_connection_id += 1;
self.peers.insert(key, entry);
(receiver_rx, id)
}
fn contains(&self, address: &PublicKey) -> bool {
self.peers.get(address).is_some()
}
/// Drops the connection to a peer. The request can be optionally filtered by the connection ID
/// in order to avoid issuing obsolete requests.
///
/// # Return value
///
/// Returns `true` if the connection with the peer was dropped. If the connection with the
/// peer was not dropped (either because it did not exist, or because
/// the provided `connection_id` is outdated), returns `false`.
fn remove(&mut self, address: &PublicKey, connection_id: Option<u64>) -> bool {
if let Some(entry) = self.peers.get(address) {
if connection_id.map_or(true, |id| id == entry.id) {
self.peers.remove(address);
return true;
}
}
false
}
}
struct Connection {
socket: Framed<TcpStream, MessagesCodec>,
receiver_rx: mpsc::Receiver<SignedMessage>,
address: ConnectedPeerAddr,
key: PublicKey,
id: u64,
}
#[derive(Clone)]
struct NetworkHandler {
listen_address: SocketAddr,
pool: SharedConnectionPool,
network_config: NetworkConfiguration,
network_tx: mpsc::Sender<NetworkEvent>,
handshake_params: HandshakeParams,
connect_list: SharedConnectList,
}
impl NetworkHandler {
fn new(
address: SocketAddr,
connection_pool: SharedConnectionPool,
network_config: NetworkConfiguration,
network_tx: mpsc::Sender<NetworkEvent>,
handshake_params: HandshakeParams,
connect_list: SharedConnectList,
) -> Self {
Self {
listen_address: address,
pool: connection_pool,
network_config,
network_tx,
handshake_params,
connect_list,
}
}
async fn listener(self) -> anyhow::Result<()> {
let mut listener = TcpListener::bind(&self.listen_address).await?;
let mut incoming_connections = listener.incoming();
// Incoming connections limiter
let incoming_connections_limit = self.network_config.max_incoming_connections;
while let Some(mut socket) = incoming_connections.try_next().await? {
let peer_address = match socket.peer_addr() {
Ok(address) => address,
Err(err) => {
log::warn!("Peer address resolution failed: {}", err);
continue;
}
};
// Check incoming connections count.
let connections_count = self.pool.read().count_incoming();
if connections_count >= incoming_connections_limit {
log::warn!(
"Rejected incoming connection with peer={}, connections limit reached.",
peer_address
);
continue;
}
let pool = self.pool.clone();
let connect_list = self.connect_list.clone();
let network_tx = self.network_tx.clone();
let handshake = NoiseHandshake::responder(&self.handshake_params);
let task = async move {
let HandshakeData {
codec,
raw_message,
peer_key,
} = handshake.listen(&mut socket).await?;
let connect = Self::parse_connect_msg(raw_message, &peer_key)?;
let peer_key = connect.author();
if!connect_list.is_peer_allowed(&peer_key) {
bail!(
"Rejecting incoming connection with peer={} public_key={}, \
the peer is not in the connect list",
peer_address,
peer_key
);
}
let conn_addr = ConnectedPeerAddr::In(peer_address);
let socket = Framed::new(socket, codec);
let maybe_connection = pool.create_connection(peer_key, conn_addr, socket);
if let Some(connection) = maybe_connection {
Self::handle_connection(connection, connect, pool, network_tx).await
} else {
Ok(())
}
};
tokio::spawn(task.unwrap_or_else(|err| log::warn!("{}", err)));
}
Ok(())
}
/// # Return value
///
/// The returned future resolves when the connection is established. The connection processing
/// is spawned onto `tokio` runtime.
fn connect(
&self,
key: PublicKey,
handshake_params: &HandshakeParams,
) -> impl Future<Output = anyhow::Result<()>> {
// Resolve peer key to an address.
let maybe_address = self.connect_list.find_address_by_key(&key);
let unresolved_address = if let Some(address) = maybe_address {
address
} else {
let err = format_err!("Trying to connect to peer {} not from connect list", key);
return future::err(err).left_future();
};
let max_connections = self.network_config.max_outgoing_connections;
let mut handshake_params = handshake_params.clone();
handshake_params.set_remote_key(key);
let pool = self.pool.clone();
let network_tx = self.network_tx.clone();
let network_config = self.network_config;
let description = format!(
"Connecting to {} (remote address = {})",
key, unresolved_address
);
let on_error = ErrorAction::new(&network_config, description);
async move {
let connect = || TcpStream::connect(&unresolved_address);
// The second component in returned value / error is the number of retries,
// which we ignore.
let (mut socket, _) = FutureRetry::new(connect, on_error)
.await
.map_err(|(err, _)| err)?;
let peer_address = match socket.peer_addr() {
Ok(addr) => addr,
Err(err) => {
let err = format_err!("Couldn't take peer addr from socket: {}", err);
return Err(err);
}
};
Self::configure_socket(&mut socket, network_config)?;
let HandshakeData {
codec,
raw_message,
peer_key,
} = NoiseHandshake::initiator(&handshake_params)
.send(&mut socket)
.await?;
if pool.read().count_outgoing() >= max_connections {
log::info!(
"Ignoring outgoing connection to {:?} because the connection limit ({}) \
is reached",
key,
max_connections
);
return Ok(());
}
let conn_addr = ConnectedPeerAddr::Out(unresolved_address, peer_address);
let connect = Self::parse_connect_msg(raw_message, &peer_key)?;
let socket = Framed::new(socket, codec);
if let Some(connection) = pool.create_connection(key, conn_addr, socket) |
Ok(())
}
.right_future()
}
async fn process_messages(
pool: SharedConnectionPool,
connection: Connection,
mut network_tx: mpsc::Sender<NetworkEvent>,
) {
let (sink, stream) = connection.socket.split();
let key = connection.key;
let connection_id = connection.id;
// Processing of incoming messages.
let incoming = async move {
let res = (&mut network_tx)
.sink_map_err(anyhow::Error::from)
.send_all(&mut stream.map_ok(NetworkEvent::MessageReceived))
.await;
if pool.write().remove(&key, Some(connection_id)) {
network_tx
.send(NetworkEvent::PeerDisconnected(key))
.await
.ok();
}
res
};
futures::pin_mut!(incoming);
// Processing of outgoing messages.
let outgoing = connection.receiver_rx.map(Ok).forward(sink);
// Select the first future to terminate and drop the remaining one.
let task = future::select(incoming, outgoing).map(|res| {
if let (Err(err), _) = res.factor_first() {
log::info!(
"Connection with peer {} terminated: {} (root cause: {})",
key,
err,
err.root_cause()
);
}
});
task.await
}
fn configure_socket(
socket: &mut TcpStream,
network_config: NetworkConfiguration,
) -> anyhow::Result<()> {
socket.set_nodelay(network_config.tcp_nodelay)?;
let duration = network_config.tcp_keep_alive.map(Duration::from_millis);
socket.set_keepalive(duration)?;
Ok(())
}
async fn handle_connection(
connection: Connection,
connect: Verified<Connect>,
pool: SharedConnectionPool,
mut network_tx: mpsc::Sender<NetworkEvent>,
) -> anyhow::Result<()> {
let address = connection.address.clone();
log::trace!("Established connection with peer {:?}", address);
Self::send_peer_connected_event(address, connect, &mut network_tx).await?;
Self::process_messages(pool, connection, network_tx).await;
Ok(())
}
fn parse_connect_msg(
raw: Vec<u8>,
key: &x25519::PublicKey,
) -> anyhow::Result<Verified<Connect>> {
let message = Message::from_raw_buffer(raw)?;
let connect: Verified<Connect> = match message {
Message::Service(Service::Connect(connect)) => connect,
other => bail!(
"First message from a remote peer is not `Connect`, got={:?}",
other
),
};
let author = into_x25519_public_key(connect.author());
ensure!(
author == *key,
"Connect message public key doesn't match with the received peer key"
);
Ok(connect)
}
pub async fn handle_requests(self, mut receiver: mpsc::Receiver<NetworkRequest>) {
while let Some(request) = receiver.next().await {
match request {
NetworkRequest::SendMessage(key, message) => {
let mut this = self.clone();
tokio::spawn(async move {
if let Err(e) = this.handle_send_message(key, message).await {
log::error!("Cannot send message to peer {:?}: {}", key, e);
}
});
}
#[cfg(test)]
NetworkRequest::DisconnectWithPeer(peer) => {
let disconnected = self.pool.write().remove(&peer, None);
if disconnected {
let mut network_tx = self.network_tx.clone();
tokio::spawn(async move {
network_tx
.send(NetworkEvent::PeerDisconnected(peer))
.await
.ok();
});
}
}
}
}
}
async fn handle_send_message(
&mut self,
address: PublicKey,
message: SignedMessage,
) -> anyhow::Result<()> {
if self.pool.read().contains(&address) {
self.pool.send_message(&address, message).await;
Ok(())
} else if self. | {
let handler = Self::handle_connection(connection, connect, pool, network_tx);
tokio::spawn(handler);
} | conditional_block |
network.rs | 10;
#[derive(Debug)]
struct ErrorAction {
retry_timeout: Duration,
max_retries: usize,
description: String,
}
impl ErrorAction {
fn new(config: &NetworkConfiguration, description: String) -> Self {
Self {
retry_timeout: Duration::from_millis(config.tcp_connect_retry_timeout),
max_retries: config.tcp_connect_max_retries as usize,
description,
}
}
}
impl ErrorHandler<io::Error> for ErrorAction {
type OutError = io::Error;
fn handle(&mut self, attempt: usize, e: io::Error) -> RetryPolicy<io::Error> {
log::trace!(
"{} failed [Attempt: {}/{}]: {}",
self.description,
attempt,
self.max_retries,
e
);
if attempt >= self.max_retries {
RetryPolicy::ForwardError(e)
} else {
let jitter = thread_rng().gen_range(0.5, 1.0);
let timeout = self.retry_timeout.mul_f64(jitter);
RetryPolicy::WaitRetry(timeout)
}
}
}
#[derive(Debug, Clone)]
pub enum ConnectedPeerAddr {
In(SocketAddr),
Out(String, SocketAddr),
}
impl ConnectedPeerAddr {
pub fn is_incoming(&self) -> bool {
match self {
Self::In(_) => true,
Self::Out(_, _) => false,
}
}
}
/// Network events.
#[derive(Debug)]
pub enum NetworkEvent {
/// A message was received from the network.
MessageReceived(Vec<u8>),
/// The node has connected to a peer.
PeerConnected {
/// Peer address.
addr: ConnectedPeerAddr,
/// Connect message.
connect: Box<Verified<Connect>>,
},
/// The node has disconnected from a peer.
PeerDisconnected(PublicKey),
/// Connection to a peer failed.
UnableConnectToPeer(PublicKey),
}
#[derive(Debug, Clone)]
pub enum NetworkRequest {
SendMessage(PublicKey, SignedMessage),
#[cfg(test)]
DisconnectWithPeer(PublicKey),
}
#[derive(Debug)]
pub struct NetworkPart {
pub our_connect_message: Verified<Connect>,
pub listen_address: SocketAddr,
pub network_config: NetworkConfiguration,
pub max_message_len: u32,
pub network_requests: mpsc::Receiver<NetworkRequest>,
pub network_tx: mpsc::Sender<NetworkEvent>,
pub(crate) connect_list: SharedConnectList,
}
#[derive(Clone, Debug)]
struct ConnectionPoolEntry {
sender: mpsc::Sender<SignedMessage>,
address: ConnectedPeerAddr,
// Connection ID assigned to the connection during instantiation. This ID is unique among
// all connections and is used in `ConnectList::remove()` to figure out whether
// it would make sense to remove a connection, or the request has been obsoleted.
id: u64,
}
#[derive(Clone, Debug)]
struct SharedConnectionPool {
inner: Arc<RwLock<ConnectionPool>>,
}
impl SharedConnectionPool {
fn new(our_key: PublicKey) -> Self {
Self {
inner: Arc::new(RwLock::new(ConnectionPool::new(our_key))),
}
}
fn read(&self) -> impl ops::Deref<Target = ConnectionPool> + '_ {
self.inner.read().unwrap()
}
fn write(&self) -> impl ops::DerefMut<Target = ConnectionPool> + '_ {
self.inner.write().unwrap()
}
async fn send_message(&self, peer_key: &PublicKey, message: SignedMessage) {
let maybe_peer_info = {
// Ensure that we don't hold the lock across the `await` point.
let peers = &self.inner.read().unwrap().peers;
peers
.get(peer_key)
.map(|peer| (peer.sender.clone(), peer.id))
};
if let Some((mut sender, connection_id)) = maybe_peer_info {
if sender.send(message).await.is_err() {
log::warn!("Cannot send message to peer {}", peer_key);
self.write().remove(peer_key, Some(connection_id));
}
}
}
fn create_connection(
&self,
peer_key: PublicKey,
address: ConnectedPeerAddr,
socket: Framed<TcpStream, MessagesCodec>,
) -> Option<Connection> {
let mut guard = self.write();
if guard.contains(&peer_key) && Self::ignore_connection(guard.our_key, peer_key) {
log::info!("Ignoring connection to {:?} per priority rules", peer_key);
return None;
}
let (receiver_rx, connection_id) = guard.add(peer_key, address.clone());
Some(Connection {
socket,
receiver_rx,
address,
key: peer_key,
id: connection_id,
})
}
/// Provides a complete, anti-symmetric relation among two peers bound in a connection.
/// This is used by the peers to decide which one of two connections are left alive
/// if the peers connect to each other simultaneously.
fn ignore_connection(our_key: PublicKey, their_key: PublicKey) -> bool {
our_key[..] < their_key[..]
}
}
#[derive(Debug)]
struct ConnectionPool {
peers: HashMap<PublicKey, ConnectionPoolEntry>,
our_key: PublicKey,
next_connection_id: u64,
}
impl ConnectionPool {
fn new(our_key: PublicKey) -> Self {
Self {
peers: HashMap::new(),
our_key,
next_connection_id: 0,
}
}
fn count_incoming(&self) -> usize {
self.peers
.values()
.filter(|entry| entry.address.is_incoming())
.count()
}
fn count_outgoing(&self) -> usize {
self.peers
.values()
.filter(|entry| entry.address.is_incoming())
.count()
}
/// Adds a peer to the connection list.
///
/// # Return value
///
/// Returns the receiver for outgoing messages to the peer and the connection ID.
fn add(
&mut self,
key: PublicKey,
address: ConnectedPeerAddr,
) -> (mpsc::Receiver<SignedMessage>, u64) {
let id = self.next_connection_id;
let (sender, receiver_rx) = mpsc::channel(OUTGOING_CHANNEL_SIZE);
let entry = ConnectionPoolEntry {
sender,
address,
id,
};
self.next_connection_id += 1;
self.peers.insert(key, entry);
(receiver_rx, id)
}
fn contains(&self, address: &PublicKey) -> bool {
self.peers.get(address).is_some()
}
/// Drops the connection to a peer. The request can be optionally filtered by the connection ID
/// in order to avoid issuing obsolete requests.
///
/// # Return value
///
/// Returns `true` if the connection with the peer was dropped. If the connection with the
/// peer was not dropped (either because it did not exist, or because
/// the provided `connection_id` is outdated), returns `false`.
fn remove(&mut self, address: &PublicKey, connection_id: Option<u64>) -> bool {
if let Some(entry) = self.peers.get(address) {
if connection_id.map_or(true, |id| id == entry.id) {
self.peers.remove(address);
return true;
}
}
false
}
}
struct Connection {
socket: Framed<TcpStream, MessagesCodec>,
receiver_rx: mpsc::Receiver<SignedMessage>,
address: ConnectedPeerAddr,
key: PublicKey,
id: u64,
}
#[derive(Clone)]
struct NetworkHandler {
listen_address: SocketAddr,
pool: SharedConnectionPool,
network_config: NetworkConfiguration,
network_tx: mpsc::Sender<NetworkEvent>,
handshake_params: HandshakeParams,
connect_list: SharedConnectList,
}
impl NetworkHandler {
fn new(
address: SocketAddr,
connection_pool: SharedConnectionPool,
network_config: NetworkConfiguration,
network_tx: mpsc::Sender<NetworkEvent>,
handshake_params: HandshakeParams,
connect_list: SharedConnectList,
) -> Self {
Self {
listen_address: address,
pool: connection_pool,
network_config,
network_tx,
handshake_params,
connect_list,
}
}
async fn listener(self) -> anyhow::Result<()> {
let mut listener = TcpListener::bind(&self.listen_address).await?;
let mut incoming_connections = listener.incoming();
// Incoming connections limiter
let incoming_connections_limit = self.network_config.max_incoming_connections;
while let Some(mut socket) = incoming_connections.try_next().await? {
let peer_address = match socket.peer_addr() {
Ok(address) => address,
Err(err) => {
log::warn!("Peer address resolution failed: {}", err);
continue;
}
};
// Check incoming connections count.
let connections_count = self.pool.read().count_incoming();
if connections_count >= incoming_connections_limit {
log::warn!(
"Rejected incoming connection with peer={}, connections limit reached.",
peer_address
);
continue;
}
let pool = self.pool.clone();
let connect_list = self.connect_list.clone();
let network_tx = self.network_tx.clone();
let handshake = NoiseHandshake::responder(&self.handshake_params);
let task = async move {
let HandshakeData {
codec,
raw_message,
peer_key,
} = handshake.listen(&mut socket).await?;
let connect = Self::parse_connect_msg(raw_message, &peer_key)?;
let peer_key = connect.author();
if!connect_list.is_peer_allowed(&peer_key) {
bail!(
"Rejecting incoming connection with peer={} public_key={}, \
the peer is not in the connect list",
peer_address,
peer_key
);
}
let conn_addr = ConnectedPeerAddr::In(peer_address);
let socket = Framed::new(socket, codec);
let maybe_connection = pool.create_connection(peer_key, conn_addr, socket);
if let Some(connection) = maybe_connection {
Self::handle_connection(connection, connect, pool, network_tx).await
} else {
Ok(())
}
};
tokio::spawn(task.unwrap_or_else(|err| log::warn!("{}", err)));
}
Ok(())
}
/// # Return value
///
/// The returned future resolves when the connection is established. The connection processing
/// is spawned onto `tokio` runtime.
fn | (
&self,
key: PublicKey,
handshake_params: &HandshakeParams,
) -> impl Future<Output = anyhow::Result<()>> {
// Resolve peer key to an address.
let maybe_address = self.connect_list.find_address_by_key(&key);
let unresolved_address = if let Some(address) = maybe_address {
address
} else {
let err = format_err!("Trying to connect to peer {} not from connect list", key);
return future::err(err).left_future();
};
let max_connections = self.network_config.max_outgoing_connections;
let mut handshake_params = handshake_params.clone();
handshake_params.set_remote_key(key);
let pool = self.pool.clone();
let network_tx = self.network_tx.clone();
let network_config = self.network_config;
let description = format!(
"Connecting to {} (remote address = {})",
key, unresolved_address
);
let on_error = ErrorAction::new(&network_config, description);
async move {
let connect = || TcpStream::connect(&unresolved_address);
// The second component in returned value / error is the number of retries,
// which we ignore.
let (mut socket, _) = FutureRetry::new(connect, on_error)
.await
.map_err(|(err, _)| err)?;
let peer_address = match socket.peer_addr() {
Ok(addr) => addr,
Err(err) => {
let err = format_err!("Couldn't take peer addr from socket: {}", err);
return Err(err);
}
};
Self::configure_socket(&mut socket, network_config)?;
let HandshakeData {
codec,
raw_message,
peer_key,
} = NoiseHandshake::initiator(&handshake_params)
.send(&mut socket)
.await?;
if pool.read().count_outgoing() >= max_connections {
log::info!(
"Ignoring outgoing connection to {:?} because the connection limit ({}) \
is reached",
key,
max_connections
);
return Ok(());
}
let conn_addr = ConnectedPeerAddr::Out(unresolved_address, peer_address);
let connect = Self::parse_connect_msg(raw_message, &peer_key)?;
let socket = Framed::new(socket, codec);
if let Some(connection) = pool.create_connection(key, conn_addr, socket) {
let handler = Self::handle_connection(connection, connect, pool, network_tx);
tokio::spawn(handler);
}
Ok(())
}
.right_future()
}
async fn process_messages(
pool: SharedConnectionPool,
connection: Connection,
mut network_tx: mpsc::Sender<NetworkEvent>,
) {
let (sink, stream) = connection.socket.split();
let key = connection.key;
let connection_id = connection.id;
// Processing of incoming messages.
let incoming = async move {
let res = (&mut network_tx)
.sink_map_err(anyhow::Error::from)
.send_all(&mut stream.map_ok(NetworkEvent::MessageReceived))
.await;
if pool.write().remove(&key, Some(connection_id)) {
network_tx
.send(NetworkEvent::PeerDisconnected(key))
.await
.ok();
}
res
};
futures::pin_mut!(incoming);
// Processing of outgoing messages.
let outgoing = connection.receiver_rx.map(Ok).forward(sink);
// Select the first future to terminate and drop the remaining one.
let task = future::select(incoming, outgoing).map(|res| {
if let (Err(err), _) = res.factor_first() {
log::info!(
"Connection with peer {} terminated: {} (root cause: {})",
key,
err,
err.root_cause()
);
}
});
task.await
}
fn configure_socket(
socket: &mut TcpStream,
network_config: NetworkConfiguration,
) -> anyhow::Result<()> {
socket.set_nodelay(network_config.tcp_nodelay)?;
let duration = network_config.tcp_keep_alive.map(Duration::from_millis);
socket.set_keepalive(duration)?;
Ok(())
}
async fn handle_connection(
connection: Connection,
connect: Verified<Connect>,
pool: SharedConnectionPool,
mut network_tx: mpsc::Sender<NetworkEvent>,
) -> anyhow::Result<()> {
let address = connection.address.clone();
log::trace!("Established connection with peer {:?}", address);
Self::send_peer_connected_event(address, connect, &mut network_tx).await?;
Self::process_messages(pool, connection, network_tx).await;
Ok(())
}
fn parse_connect_msg(
raw: Vec<u8>,
key: &x25519::PublicKey,
) -> anyhow::Result<Verified<Connect>> {
let message = Message::from_raw_buffer(raw)?;
let connect: Verified<Connect> = match message {
Message::Service(Service::Connect(connect)) => connect,
other => bail!(
"First message from a remote peer is not `Connect`, got={:?}",
other
),
};
let author = into_x25519_public_key(connect.author());
ensure!(
author == *key,
"Connect message public key doesn't match with the received peer key"
);
Ok(connect)
}
pub async fn handle_requests(self, mut receiver: mpsc::Receiver<NetworkRequest>) {
while let Some(request) = receiver.next().await {
match request {
NetworkRequest::SendMessage(key, message) => {
let mut this = self.clone();
tokio::spawn(async move {
if let Err(e) = this.handle_send_message(key, message).await {
log::error!("Cannot send message to peer {:?}: {}", key, e);
}
});
}
#[cfg(test)]
NetworkRequest::DisconnectWithPeer(peer) => {
let disconnected = self.pool.write().remove(&peer, None);
if disconnected {
let mut network_tx = self.network_tx.clone();
tokio::spawn(async move {
network_tx
.send(NetworkEvent::PeerDisconnected(peer))
.await
.ok();
});
}
}
}
}
}
async fn handle_send_message(
&mut self,
address: PublicKey,
message: SignedMessage,
) -> anyhow::Result<()> {
if self.pool.read().contains(&address) {
self.pool.send_message(&address, message).await;
Ok(())
} else if self. | connect | identifier_name |
network.rs | = 10;
#[derive(Debug)]
struct ErrorAction {
retry_timeout: Duration,
max_retries: usize,
description: String,
}
impl ErrorAction {
fn new(config: &NetworkConfiguration, description: String) -> Self {
Self {
retry_timeout: Duration::from_millis(config.tcp_connect_retry_timeout),
max_retries: config.tcp_connect_max_retries as usize,
description,
}
}
}
impl ErrorHandler<io::Error> for ErrorAction {
type OutError = io::Error;
fn handle(&mut self, attempt: usize, e: io::Error) -> RetryPolicy<io::Error> {
log::trace!(
"{} failed [Attempt: {}/{}]: {}",
self.description,
attempt,
self.max_retries,
e
);
if attempt >= self.max_retries {
RetryPolicy::ForwardError(e)
} else {
let jitter = thread_rng().gen_range(0.5, 1.0);
let timeout = self.retry_timeout.mul_f64(jitter);
RetryPolicy::WaitRetry(timeout)
}
}
}
#[derive(Debug, Clone)]
pub enum ConnectedPeerAddr {
In(SocketAddr),
Out(String, SocketAddr),
}
impl ConnectedPeerAddr {
pub fn is_incoming(&self) -> bool {
match self {
Self::In(_) => true,
Self::Out(_, _) => false,
}
}
}
/// Network events.
#[derive(Debug)]
pub enum NetworkEvent {
/// A message was received from the network.
MessageReceived(Vec<u8>),
/// The node has connected to a peer.
PeerConnected {
/// Peer address.
addr: ConnectedPeerAddr,
/// Connect message.
connect: Box<Verified<Connect>>,
},
/// The node has disconnected from a peer.
PeerDisconnected(PublicKey),
/// Connection to a peer failed.
UnableConnectToPeer(PublicKey),
}
#[derive(Debug, Clone)]
pub enum NetworkRequest {
SendMessage(PublicKey, SignedMessage),
#[cfg(test)]
DisconnectWithPeer(PublicKey),
}
#[derive(Debug)]
pub struct NetworkPart {
pub our_connect_message: Verified<Connect>,
pub listen_address: SocketAddr,
pub network_config: NetworkConfiguration,
pub max_message_len: u32,
pub network_requests: mpsc::Receiver<NetworkRequest>,
pub network_tx: mpsc::Sender<NetworkEvent>,
pub(crate) connect_list: SharedConnectList,
}
#[derive(Clone, Debug)]
struct ConnectionPoolEntry {
sender: mpsc::Sender<SignedMessage>,
address: ConnectedPeerAddr,
// Connection ID assigned to the connection during instantiation. This ID is unique among
// all connections and is used in `ConnectList::remove()` to figure out whether
// it would make sense to remove a connection, or the request has been obsoleted.
id: u64,
}
#[derive(Clone, Debug)]
struct SharedConnectionPool {
inner: Arc<RwLock<ConnectionPool>>,
}
impl SharedConnectionPool {
fn new(our_key: PublicKey) -> Self {
Self {
inner: Arc::new(RwLock::new(ConnectionPool::new(our_key))),
}
}
fn read(&self) -> impl ops::Deref<Target = ConnectionPool> + '_ {
self.inner.read().unwrap()
}
fn write(&self) -> impl ops::DerefMut<Target = ConnectionPool> + '_ {
self.inner.write().unwrap()
}
async fn send_message(&self, peer_key: &PublicKey, message: SignedMessage) {
let maybe_peer_info = {
// Ensure that we don't hold the lock across the `await` point.
let peers = &self.inner.read().unwrap().peers;
peers
.get(peer_key)
.map(|peer| (peer.sender.clone(), peer.id))
};
if let Some((mut sender, connection_id)) = maybe_peer_info {
if sender.send(message).await.is_err() {
log::warn!("Cannot send message to peer {}", peer_key);
self.write().remove(peer_key, Some(connection_id));
}
}
}
fn create_connection(
&self,
peer_key: PublicKey,
address: ConnectedPeerAddr,
socket: Framed<TcpStream, MessagesCodec>,
) -> Option<Connection> {
let mut guard = self.write();
if guard.contains(&peer_key) && Self::ignore_connection(guard.our_key, peer_key) {
log::info!("Ignoring connection to {:?} per priority rules", peer_key);
return None;
}
let (receiver_rx, connection_id) = guard.add(peer_key, address.clone());
Some(Connection {
socket,
receiver_rx,
address,
key: peer_key,
id: connection_id,
})
}
/// Provides a complete, anti-symmetric relation among two peers bound in a connection.
/// This is used by the peers to decide which one of two connections are left alive
/// if the peers connect to each other simultaneously.
fn ignore_connection(our_key: PublicKey, their_key: PublicKey) -> bool {
our_key[..] < their_key[..]
}
}
#[derive(Debug)]
struct ConnectionPool {
peers: HashMap<PublicKey, ConnectionPoolEntry>,
our_key: PublicKey,
next_connection_id: u64,
}
impl ConnectionPool {
fn new(our_key: PublicKey) -> Self {
Self {
peers: HashMap::new(),
our_key,
next_connection_id: 0,
}
}
fn count_incoming(&self) -> usize {
self.peers
.values()
.filter(|entry| entry.address.is_incoming())
.count()
}
fn count_outgoing(&self) -> usize {
self.peers
.values()
.filter(|entry| entry.address.is_incoming())
.count()
}
/// Adds a peer to the connection list.
///
/// # Return value
///
/// Returns the receiver for outgoing messages to the peer and the connection ID.
fn add(
&mut self,
key: PublicKey,
address: ConnectedPeerAddr,
) -> (mpsc::Receiver<SignedMessage>, u64) {
let id = self.next_connection_id;
let (sender, receiver_rx) = mpsc::channel(OUTGOING_CHANNEL_SIZE);
let entry = ConnectionPoolEntry {
sender,
address,
id,
};
self.next_connection_id += 1;
self.peers.insert(key, entry);
(receiver_rx, id)
}
fn contains(&self, address: &PublicKey) -> bool {
self.peers.get(address).is_some()
}
/// Drops the connection to a peer. The request can be optionally filtered by the connection ID
/// in order to avoid issuing obsolete requests.
///
/// # Return value
///
/// Returns `true` if the connection with the peer was dropped. If the connection with the
/// peer was not dropped (either because it did not exist, or because
/// the provided `connection_id` is outdated), returns `false`.
fn remove(&mut self, address: &PublicKey, connection_id: Option<u64>) -> bool {
if let Some(entry) = self.peers.get(address) {
if connection_id.map_or(true, |id| id == entry.id) {
self.peers.remove(address);
return true;
}
}
false
}
}
struct Connection {
socket: Framed<TcpStream, MessagesCodec>,
receiver_rx: mpsc::Receiver<SignedMessage>,
address: ConnectedPeerAddr,
key: PublicKey,
id: u64,
}
#[derive(Clone)]
struct NetworkHandler {
listen_address: SocketAddr,
pool: SharedConnectionPool,
network_config: NetworkConfiguration,
network_tx: mpsc::Sender<NetworkEvent>,
handshake_params: HandshakeParams,
connect_list: SharedConnectList,
}
impl NetworkHandler {
fn new(
address: SocketAddr,
connection_pool: SharedConnectionPool,
network_config: NetworkConfiguration,
network_tx: mpsc::Sender<NetworkEvent>,
handshake_params: HandshakeParams,
connect_list: SharedConnectList,
) -> Self {
Self {
listen_address: address,
pool: connection_pool,
network_config,
network_tx,
handshake_params,
connect_list,
}
}
async fn listener(self) -> anyhow::Result<()> {
let mut listener = TcpListener::bind(&self.listen_address).await?;
let mut incoming_connections = listener.incoming();
// Incoming connections limiter
let incoming_connections_limit = self.network_config.max_incoming_connections;
while let Some(mut socket) = incoming_connections.try_next().await? {
let peer_address = match socket.peer_addr() {
Ok(address) => address,
Err(err) => {
log::warn!("Peer address resolution failed: {}", err);
continue;
}
};
// Check incoming connections count.
let connections_count = self.pool.read().count_incoming();
if connections_count >= incoming_connections_limit {
log::warn!(
"Rejected incoming connection with peer={}, connections limit reached.",
peer_address
);
continue;
}
let pool = self.pool.clone();
let connect_list = self.connect_list.clone();
let network_tx = self.network_tx.clone();
let handshake = NoiseHandshake::responder(&self.handshake_params);
let task = async move {
let HandshakeData {
codec,
raw_message,
peer_key,
} = handshake.listen(&mut socket).await?;
let connect = Self::parse_connect_msg(raw_message, &peer_key)?;
let peer_key = connect.author();
if!connect_list.is_peer_allowed(&peer_key) {
bail!(
"Rejecting incoming connection with peer={} public_key={}, \
the peer is not in the connect list",
peer_address,
peer_key
);
}
let conn_addr = ConnectedPeerAddr::In(peer_address);
let socket = Framed::new(socket, codec);
let maybe_connection = pool.create_connection(peer_key, conn_addr, socket);
if let Some(connection) = maybe_connection {
Self::handle_connection(connection, connect, pool, network_tx).await
} else {
Ok(())
}
};
tokio::spawn(task.unwrap_or_else(|err| log::warn!("{}", err)));
}
Ok(())
}
/// # Return value
///
/// The returned future resolves when the connection is established. The connection processing
/// is spawned onto `tokio` runtime.
fn connect(
&self,
key: PublicKey,
handshake_params: &HandshakeParams,
) -> impl Future<Output = anyhow::Result<()>> {
// Resolve peer key to an address.
let maybe_address = self.connect_list.find_address_by_key(&key);
let unresolved_address = if let Some(address) = maybe_address {
address
} else {
let err = format_err!("Trying to connect to peer {} not from connect list", key);
return future::err(err).left_future();
};
let max_connections = self.network_config.max_outgoing_connections;
let mut handshake_params = handshake_params.clone();
handshake_params.set_remote_key(key);
let pool = self.pool.clone();
let network_tx = self.network_tx.clone();
let network_config = self.network_config;
let description = format!(
"Connecting to {} (remote address = {})",
key, unresolved_address
);
let on_error = ErrorAction::new(&network_config, description);
async move {
let connect = || TcpStream::connect(&unresolved_address);
// The second component in returned value / error is the number of retries,
// which we ignore.
let (mut socket, _) = FutureRetry::new(connect, on_error)
.await
.map_err(|(err, _)| err)?;
let peer_address = match socket.peer_addr() {
Ok(addr) => addr,
Err(err) => {
let err = format_err!("Couldn't take peer addr from socket: {}", err);
return Err(err);
}
};
Self::configure_socket(&mut socket, network_config)?;
let HandshakeData {
codec,
raw_message,
peer_key,
} = NoiseHandshake::initiator(&handshake_params) | "Ignoring outgoing connection to {:?} because the connection limit ({}) \
is reached",
key,
max_connections
);
return Ok(());
}
let conn_addr = ConnectedPeerAddr::Out(unresolved_address, peer_address);
let connect = Self::parse_connect_msg(raw_message, &peer_key)?;
let socket = Framed::new(socket, codec);
if let Some(connection) = pool.create_connection(key, conn_addr, socket) {
let handler = Self::handle_connection(connection, connect, pool, network_tx);
tokio::spawn(handler);
}
Ok(())
}
.right_future()
}
async fn process_messages(
pool: SharedConnectionPool,
connection: Connection,
mut network_tx: mpsc::Sender<NetworkEvent>,
) {
let (sink, stream) = connection.socket.split();
let key = connection.key;
let connection_id = connection.id;
// Processing of incoming messages.
let incoming = async move {
let res = (&mut network_tx)
.sink_map_err(anyhow::Error::from)
.send_all(&mut stream.map_ok(NetworkEvent::MessageReceived))
.await;
if pool.write().remove(&key, Some(connection_id)) {
network_tx
.send(NetworkEvent::PeerDisconnected(key))
.await
.ok();
}
res
};
futures::pin_mut!(incoming);
// Processing of outgoing messages.
let outgoing = connection.receiver_rx.map(Ok).forward(sink);
// Select the first future to terminate and drop the remaining one.
let task = future::select(incoming, outgoing).map(|res| {
if let (Err(err), _) = res.factor_first() {
log::info!(
"Connection with peer {} terminated: {} (root cause: {})",
key,
err,
err.root_cause()
);
}
});
task.await
}
fn configure_socket(
socket: &mut TcpStream,
network_config: NetworkConfiguration,
) -> anyhow::Result<()> {
socket.set_nodelay(network_config.tcp_nodelay)?;
let duration = network_config.tcp_keep_alive.map(Duration::from_millis);
socket.set_keepalive(duration)?;
Ok(())
}
async fn handle_connection(
connection: Connection,
connect: Verified<Connect>,
pool: SharedConnectionPool,
mut network_tx: mpsc::Sender<NetworkEvent>,
) -> anyhow::Result<()> {
let address = connection.address.clone();
log::trace!("Established connection with peer {:?}", address);
Self::send_peer_connected_event(address, connect, &mut network_tx).await?;
Self::process_messages(pool, connection, network_tx).await;
Ok(())
}
fn parse_connect_msg(
raw: Vec<u8>,
key: &x25519::PublicKey,
) -> anyhow::Result<Verified<Connect>> {
let message = Message::from_raw_buffer(raw)?;
let connect: Verified<Connect> = match message {
Message::Service(Service::Connect(connect)) => connect,
other => bail!(
"First message from a remote peer is not `Connect`, got={:?}",
other
),
};
let author = into_x25519_public_key(connect.author());
ensure!(
author == *key,
"Connect message public key doesn't match with the received peer key"
);
Ok(connect)
}
pub async fn handle_requests(self, mut receiver: mpsc::Receiver<NetworkRequest>) {
while let Some(request) = receiver.next().await {
match request {
NetworkRequest::SendMessage(key, message) => {
let mut this = self.clone();
tokio::spawn(async move {
if let Err(e) = this.handle_send_message(key, message).await {
log::error!("Cannot send message to peer {:?}: {}", key, e);
}
});
}
#[cfg(test)]
NetworkRequest::DisconnectWithPeer(peer) => {
let disconnected = self.pool.write().remove(&peer, None);
if disconnected {
let mut network_tx = self.network_tx.clone();
tokio::spawn(async move {
network_tx
.send(NetworkEvent::PeerDisconnected(peer))
.await
.ok();
});
}
}
}
}
}
async fn handle_send_message(
&mut self,
address: PublicKey,
message: SignedMessage,
) -> anyhow::Result<()> {
if self.pool.read().contains(&address) {
self.pool.send_message(&address, message).await;
Ok(())
} else if self.can_ | .send(&mut socket)
.await?;
if pool.read().count_outgoing() >= max_connections {
log::info!( | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.