file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
insertion.rs
// Copyright 2020 Johannes Köster. // Licensed under the GNU GPLv3 license (https://opensource.org/licenses/GPL-3.0) // This file may not be copied, modified, or distributed // except according to those terms. use std::cell::RefCell; use std::cmp; use std::rc::Rc; use std::sync::Arc; use anyhow::Result; use bio::stats::pairhmm::EmissionParameters; use bio::stats::LogProb; use bio_types::genome::{self, AbstractInterval, AbstractLocus}; use crate::estimation::alignment_properties::AlignmentProperties; use crate::reference; use crate::variants::evidence::realignment::pairhmm::{ReadEmission, RefBaseEmission}; use crate::variants::evidence::realignment::{Realignable, Realigner}; use crate::variants::sampling_bias::{ReadSamplingBias, SamplingBias}; use crate::variants::types::{AlleleSupport, MultiLocus, PairedEndEvidence, SingleLocus, Variant}; use crate::{default_emission, default_ref_base_emission}; pub(crate) struct Insertion<R: Realigner> { locus: MultiLocus, ins_seq: Rc<Vec<u8>>, realigner: RefCell<R>, } impl<R: Realigner> Insertion<R> { pub(crate) fn new(locus: genome::Locus, ins_seq: Vec<u8>, realigner: R) -> Self { Insertion { locus: MultiLocus::new(vec![SingleLocus::new(genome::Interval::new( locus.contig().to_owned(), locus.pos()..locus.pos() + 1, ))]), ins_seq: Rc::new(ins_seq), realigner: RefCell::new(realigner), } } pub(crate) fn locus(&self) -> &SingleLocus { &self.locus[0] } } impl<'a, R: Realigner> Realignable<'a> for Insertion<R> { type EmissionParams = InsertionEmissionParams<'a>; fn alt_emission_params( &self, read_emission_params: Rc<ReadEmission<'a>>, ref_buffer: Arc<reference::Buffer>, _: &genome::Interval, ref_window: usize, ) -> Result<Vec<InsertionEmissionParams<'a>>> { let l = self.ins_seq.len() as usize; let start = self.locus().range().start as usize; let ref_seq = ref_buffer.seq(self.locus().contig())?; let ref_seq_len = ref_seq.len(); Ok(vec![InsertionEmissionParams { ref_seq, ref_offset: start.saturating_sub(ref_window), ref_end: cmp::min(start + l + ref_window, ref_seq_len), ins_start: start, ins_len: l, ins_end: start + l, ins_seq: Rc::clone(&self.ins_seq), read_emission: read_emission_params, }]) } } impl<R: Realigner> SamplingBias for Insertion<R> { fn feasible_bases(&self, read_len: u64, alignment_properties: &AlignmentProperties) -> u64 { if let Some(len) = self.enclosable_len() { if len < (alignment_properties.max_ins_cigar_len as u64) { return read_len; } } (read_len as f64 * alignment_properties.frac_max_softclip) as u64 } fn enclosable_len(&self) -> Option<u64> { Some(self.ins_seq.len() as u64) } } impl<R: Realigner> ReadSamplingBias for Insertion<R> {} impl<R: Realigner> Variant for Insertion<R> { type Evidence = PairedEndEvidence; type Loci = MultiLocus; fn is_valid_evidence( &self, evidence: &Self::Evidence, _: &AlignmentProperties, ) -> Option<Vec<usize>> { if match evidence { PairedEndEvidence::SingleEnd(read) =>!self.locus().overlap(read, true).is_none(), PairedEndEvidence::PairedEnd { left, right } => { !self.locus().overlap(left, true).is_none() ||!self.locus().overlap(right, true).is_none() } } { Some(vec![0]) } else { None } } /// Return variant loci. fn loci(&self) -> &Self::Loci {
/// Calculate probability for alt and reference allele. fn allele_support( &self, evidence: &Self::Evidence, _alignment_properties: &AlignmentProperties, ) -> Result<Option<AlleleSupport>> { match evidence { PairedEndEvidence::SingleEnd(record) => Ok(Some( self.realigner .borrow_mut() .allele_support(record, self.locus.iter(), self)?, )), PairedEndEvidence::PairedEnd { left, right } => { let left_support = self.realigner .borrow_mut() .allele_support(left, self.locus.iter(), self)?; let right_support = self.realigner .borrow_mut() .allele_support(right, self.locus.iter(), self)?; let mut support = left_support; support.merge(&right_support); Ok(Some(support)) } } } fn prob_sample_alt( &self, evidence: &Self::Evidence, alignment_properties: &AlignmentProperties, ) -> LogProb { match evidence { PairedEndEvidence::PairedEnd { left, right } => { // METHOD: we do not require the fragment to enclose the variant. // Hence, we treat both reads independently. (self .prob_sample_alt_read(left.seq().len() as u64, alignment_properties) .ln_one_minus_exp() + self .prob_sample_alt_read(right.seq().len() as u64, alignment_properties) .ln_one_minus_exp()) .ln_one_minus_exp() } PairedEndEvidence::SingleEnd(read) => { self.prob_sample_alt_read(read.seq().len() as u64, alignment_properties) } } } } /// Emission parameters for PairHMM over insertion allele. pub(crate) struct InsertionEmissionParams<'a> { ref_seq: Arc<Vec<u8>>, ref_offset: usize, ref_end: usize, ins_start: usize, ins_end: usize, ins_len: usize, ins_seq: Rc<Vec<u8>>, read_emission: Rc<ReadEmission<'a>>, } impl<'a> RefBaseEmission for InsertionEmissionParams<'a> { #[inline] fn ref_base(&self, i: usize) -> u8 { let i_ = i + self.ref_offset; if i_ <= self.ins_start { self.ref_seq[i_] } else if i_ > self.ins_end { self.ref_seq[i_ - self.ins_len] } else { self.ins_seq[i_ - (self.ins_start + 1)] } } default_ref_base_emission!(); } impl<'a> EmissionParameters for InsertionEmissionParams<'a> { default_emission!(); #[inline] fn len_x(&self) -> usize { self.ref_end - self.ref_offset + self.ins_len } }
&self.locus }
identifier_body
insertion.rs
// Copyright 2020 Johannes Köster. // Licensed under the GNU GPLv3 license (https://opensource.org/licenses/GPL-3.0) // This file may not be copied, modified, or distributed // except according to those terms. use std::cell::RefCell; use std::cmp; use std::rc::Rc; use std::sync::Arc; use anyhow::Result; use bio::stats::pairhmm::EmissionParameters; use bio::stats::LogProb; use bio_types::genome::{self, AbstractInterval, AbstractLocus}; use crate::estimation::alignment_properties::AlignmentProperties; use crate::reference; use crate::variants::evidence::realignment::pairhmm::{ReadEmission, RefBaseEmission}; use crate::variants::evidence::realignment::{Realignable, Realigner}; use crate::variants::sampling_bias::{ReadSamplingBias, SamplingBias}; use crate::variants::types::{AlleleSupport, MultiLocus, PairedEndEvidence, SingleLocus, Variant}; use crate::{default_emission, default_ref_base_emission}; pub(crate) struct Insertion<R: Realigner> { locus: MultiLocus, ins_seq: Rc<Vec<u8>>, realigner: RefCell<R>, } impl<R: Realigner> Insertion<R> { pub(crate) fn new(locus: genome::Locus, ins_seq: Vec<u8>, realigner: R) -> Self { Insertion { locus: MultiLocus::new(vec![SingleLocus::new(genome::Interval::new( locus.contig().to_owned(), locus.pos()..locus.pos() + 1, ))]), ins_seq: Rc::new(ins_seq), realigner: RefCell::new(realigner), } } pub(crate) fn locus(&self) -> &SingleLocus { &self.locus[0] } } impl<'a, R: Realigner> Realignable<'a> for Insertion<R> { type EmissionParams = InsertionEmissionParams<'a>; fn alt_emission_params( &self, read_emission_params: Rc<ReadEmission<'a>>, ref_buffer: Arc<reference::Buffer>, _: &genome::Interval, ref_window: usize, ) -> Result<Vec<InsertionEmissionParams<'a>>> { let l = self.ins_seq.len() as usize; let start = self.locus().range().start as usize; let ref_seq = ref_buffer.seq(self.locus().contig())?; let ref_seq_len = ref_seq.len(); Ok(vec![InsertionEmissionParams { ref_seq, ref_offset: start.saturating_sub(ref_window), ref_end: cmp::min(start + l + ref_window, ref_seq_len), ins_start: start, ins_len: l, ins_end: start + l, ins_seq: Rc::clone(&self.ins_seq), read_emission: read_emission_params, }]) } } impl<R: Realigner> SamplingBias for Insertion<R> { fn feasible_bases(&self, read_len: u64, alignment_properties: &AlignmentProperties) -> u64 { if let Some(len) = self.enclosable_len() { if len < (alignment_properties.max_ins_cigar_len as u64) { return read_len; } } (read_len as f64 * alignment_properties.frac_max_softclip) as u64 } fn enclosable_len(&self) -> Option<u64> { Some(self.ins_seq.len() as u64) } } impl<R: Realigner> ReadSamplingBias for Insertion<R> {} impl<R: Realigner> Variant for Insertion<R> { type Evidence = PairedEndEvidence; type Loci = MultiLocus; fn is_valid_evidence( &self, evidence: &Self::Evidence, _: &AlignmentProperties, ) -> Option<Vec<usize>> { if match evidence { PairedEndEvidence::SingleEnd(read) =>!self.locus().overlap(read, true).is_none(), PairedEndEvidence::PairedEnd { left, right } => { !self.locus().overlap(left, true).is_none() ||!self.locus().overlap(right, true).is_none() } } { Some(vec![0]) } else { None } } /// Return variant loci. fn loci(&self) -> &Self::Loci { &self.locus } /// Calculate probability for alt and reference allele. fn a
&self, evidence: &Self::Evidence, _alignment_properties: &AlignmentProperties, ) -> Result<Option<AlleleSupport>> { match evidence { PairedEndEvidence::SingleEnd(record) => Ok(Some( self.realigner .borrow_mut() .allele_support(record, self.locus.iter(), self)?, )), PairedEndEvidence::PairedEnd { left, right } => { let left_support = self.realigner .borrow_mut() .allele_support(left, self.locus.iter(), self)?; let right_support = self.realigner .borrow_mut() .allele_support(right, self.locus.iter(), self)?; let mut support = left_support; support.merge(&right_support); Ok(Some(support)) } } } fn prob_sample_alt( &self, evidence: &Self::Evidence, alignment_properties: &AlignmentProperties, ) -> LogProb { match evidence { PairedEndEvidence::PairedEnd { left, right } => { // METHOD: we do not require the fragment to enclose the variant. // Hence, we treat both reads independently. (self .prob_sample_alt_read(left.seq().len() as u64, alignment_properties) .ln_one_minus_exp() + self .prob_sample_alt_read(right.seq().len() as u64, alignment_properties) .ln_one_minus_exp()) .ln_one_minus_exp() } PairedEndEvidence::SingleEnd(read) => { self.prob_sample_alt_read(read.seq().len() as u64, alignment_properties) } } } } /// Emission parameters for PairHMM over insertion allele. pub(crate) struct InsertionEmissionParams<'a> { ref_seq: Arc<Vec<u8>>, ref_offset: usize, ref_end: usize, ins_start: usize, ins_end: usize, ins_len: usize, ins_seq: Rc<Vec<u8>>, read_emission: Rc<ReadEmission<'a>>, } impl<'a> RefBaseEmission for InsertionEmissionParams<'a> { #[inline] fn ref_base(&self, i: usize) -> u8 { let i_ = i + self.ref_offset; if i_ <= self.ins_start { self.ref_seq[i_] } else if i_ > self.ins_end { self.ref_seq[i_ - self.ins_len] } else { self.ins_seq[i_ - (self.ins_start + 1)] } } default_ref_base_emission!(); } impl<'a> EmissionParameters for InsertionEmissionParams<'a> { default_emission!(); #[inline] fn len_x(&self) -> usize { self.ref_end - self.ref_offset + self.ins_len } }
llele_support(
identifier_name
cargo_run.rs
use std::path::Path; use ops::{self, CompileFilter}; use util::{self, CargoResult, process, ProcessError}; use core::Package; pub fn run(manifest_path: &Path, options: &ops::CompileOptions, args: &[String]) -> CargoResult<Option<ProcessError>>
if bins.next().is_some() { match options.filter { CompileFilter::Everything => { bail!("`cargo run` requires that a project only have one \ executable; use the `--bin` option to specify which one \ to run") } CompileFilter::Only {.. } => { bail!("`cargo run` can run at most one executable, but \ multiple were specified") } } } let compile = try!(ops::compile(manifest_path, options)); let exe = &compile.binaries[0]; let exe = match util::without_prefix(&exe, config.cwd()) { Some(path) if path.file_name() == Some(path.as_os_str()) => Path::new(".").join(path).to_path_buf(), Some(path) => path.to_path_buf(), None => exe.to_path_buf(), }; let mut process = try!(compile.target_process(exe, &root)) .into_process_builder(); process.args(args).cwd(config.cwd()); try!(config.shell().status("Running", process.to_string())); Ok(process.exec().err()) }
{ let config = options.config; let root = try!(Package::for_path(manifest_path, config)); let mut bins = root.manifest().targets().iter().filter(|a| { !a.is_lib() && !a.is_custom_build() && match options.filter { CompileFilter::Everything => a.is_bin(), CompileFilter::Only { .. } => options.filter.matches(a), } }); if bins.next().is_none() { match options.filter { CompileFilter::Everything => { bail!("a bin target must be available for `cargo run`") } CompileFilter::Only { .. } => { // this will be verified in cargo_compile } } }
identifier_body
cargo_run.rs
use std::path::Path; use ops::{self, CompileFilter}; use util::{self, CargoResult, process, ProcessError}; use core::Package; pub fn run(manifest_path: &Path, options: &ops::CompileOptions, args: &[String]) -> CargoResult<Option<ProcessError>> { let config = options.config; let root = try!(Package::for_path(manifest_path, config)); let mut bins = root.manifest().targets().iter().filter(|a| { !a.is_lib() &&!a.is_custom_build() && match options.filter { CompileFilter::Everything => a.is_bin(), CompileFilter::Only {.. } => options.filter.matches(a), } }); if bins.next().is_none() { match options.filter { CompileFilter::Everything => { bail!("a bin target must be available for `cargo run`") } CompileFilter::Only {.. } => { // this will be verified in cargo_compile
if bins.next().is_some() { match options.filter { CompileFilter::Everything => { bail!("`cargo run` requires that a project only have one \ executable; use the `--bin` option to specify which one \ to run") } CompileFilter::Only {.. } => { bail!("`cargo run` can run at most one executable, but \ multiple were specified") } } } let compile = try!(ops::compile(manifest_path, options)); let exe = &compile.binaries[0]; let exe = match util::without_prefix(&exe, config.cwd()) { Some(path) if path.file_name() == Some(path.as_os_str()) => Path::new(".").join(path).to_path_buf(), Some(path) => path.to_path_buf(), None => exe.to_path_buf(), }; let mut process = try!(compile.target_process(exe, &root)) .into_process_builder(); process.args(args).cwd(config.cwd()); try!(config.shell().status("Running", process.to_string())); Ok(process.exec().err()) }
} } }
random_line_split
cargo_run.rs
use std::path::Path; use ops::{self, CompileFilter}; use util::{self, CargoResult, process, ProcessError}; use core::Package; pub fn
(manifest_path: &Path, options: &ops::CompileOptions, args: &[String]) -> CargoResult<Option<ProcessError>> { let config = options.config; let root = try!(Package::for_path(manifest_path, config)); let mut bins = root.manifest().targets().iter().filter(|a| { !a.is_lib() &&!a.is_custom_build() && match options.filter { CompileFilter::Everything => a.is_bin(), CompileFilter::Only {.. } => options.filter.matches(a), } }); if bins.next().is_none() { match options.filter { CompileFilter::Everything => { bail!("a bin target must be available for `cargo run`") } CompileFilter::Only {.. } => { // this will be verified in cargo_compile } } } if bins.next().is_some() { match options.filter { CompileFilter::Everything => { bail!("`cargo run` requires that a project only have one \ executable; use the `--bin` option to specify which one \ to run") } CompileFilter::Only {.. } => { bail!("`cargo run` can run at most one executable, but \ multiple were specified") } } } let compile = try!(ops::compile(manifest_path, options)); let exe = &compile.binaries[0]; let exe = match util::without_prefix(&exe, config.cwd()) { Some(path) if path.file_name() == Some(path.as_os_str()) => Path::new(".").join(path).to_path_buf(), Some(path) => path.to_path_buf(), None => exe.to_path_buf(), }; let mut process = try!(compile.target_process(exe, &root)) .into_process_builder(); process.args(args).cwd(config.cwd()); try!(config.shell().status("Running", process.to_string())); Ok(process.exec().err()) }
run
identifier_name
box.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Generic types for box properties. use values::animated::ToAnimatedZero; /// A generic value for the `vertical-align` property. #[derive(Animate, Clone, ComputeSquaredDistance, Copy, Debug, MallocSizeOf, PartialEq)] #[derive(ToComputedValue, ToCss)] pub enum VerticalAlign<LengthOrPercentage> { /// `baseline` Baseline, /// `sub` Sub, /// `super` Super, /// `top` Top, /// `text-top` TextTop, /// `middle` Middle, /// `bottom` Bottom, /// `text-bottom` TextBottom, /// `-moz-middle-with-baseline` #[cfg(feature = "gecko")] MozMiddleWithBaseline, /// `<length-percentage>` Length(LengthOrPercentage), } impl<L> VerticalAlign<L> { /// Returns `baseline`. #[inline] pub fn baseline() -> Self {
} } impl<L> ToAnimatedZero for VerticalAlign<L> { fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } /// https://drafts.csswg.org/css-animations/#animation-iteration-count #[derive(Clone, Debug, MallocSizeOf, PartialEq, ToComputedValue, ToCss)] pub enum AnimationIterationCount<Number> { /// A `<number>` value. Number(Number), /// The `infinite` keyword. Infinite, } /// A generic value for the `perspective` property. #[derive(Animate, Clone, ComputeSquaredDistance, Copy, Debug, MallocSizeOf)] #[derive(PartialEq, ToAnimatedValue, ToAnimatedZero, ToComputedValue, ToCss)] pub enum Perspective<NonNegativeLength> { /// A non-negative length. Length(NonNegativeLength), /// The keyword `none`. None, } impl<L> Perspective<L> { /// Returns `none`. #[inline] pub fn none() -> Self { Perspective::None } }
VerticalAlign::Baseline
random_line_split
box.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Generic types for box properties. use values::animated::ToAnimatedZero; /// A generic value for the `vertical-align` property. #[derive(Animate, Clone, ComputeSquaredDistance, Copy, Debug, MallocSizeOf, PartialEq)] #[derive(ToComputedValue, ToCss)] pub enum VerticalAlign<LengthOrPercentage> { /// `baseline` Baseline, /// `sub` Sub, /// `super` Super, /// `top` Top, /// `text-top` TextTop, /// `middle` Middle, /// `bottom` Bottom, /// `text-bottom` TextBottom, /// `-moz-middle-with-baseline` #[cfg(feature = "gecko")] MozMiddleWithBaseline, /// `<length-percentage>` Length(LengthOrPercentage), } impl<L> VerticalAlign<L> { /// Returns `baseline`. #[inline] pub fn baseline() -> Self { VerticalAlign::Baseline } } impl<L> ToAnimatedZero for VerticalAlign<L> { fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } /// https://drafts.csswg.org/css-animations/#animation-iteration-count #[derive(Clone, Debug, MallocSizeOf, PartialEq, ToComputedValue, ToCss)] pub enum AnimationIterationCount<Number> { /// A `<number>` value. Number(Number), /// The `infinite` keyword. Infinite, } /// A generic value for the `perspective` property. #[derive(Animate, Clone, ComputeSquaredDistance, Copy, Debug, MallocSizeOf)] #[derive(PartialEq, ToAnimatedValue, ToAnimatedZero, ToComputedValue, ToCss)] pub enum
<NonNegativeLength> { /// A non-negative length. Length(NonNegativeLength), /// The keyword `none`. None, } impl<L> Perspective<L> { /// Returns `none`. #[inline] pub fn none() -> Self { Perspective::None } }
Perspective
identifier_name
jit.rs
#![feature(asm)] use std::io::{prelude::*}; use std::env; use std::fs::File; use std::convert::TryInto; use brainfuck as bf; extern crate assembler; use ::assembler::{ ExecutableAnonymousMemoryMap, InstructionStream, InstructionStreamHints }; fn compile(expressions: &Vec<bf::Expression>, stream: &mut InstructionStream) { for expression in expressions { match expression { // 000000000000000c forward: // c: 48 81 c6 80 00 00 00 addq $128, %rsi &bf::Expression::MoveForward(n) => { stream.emit_bytes(b"\x48\x81\xc6"); stream.emit_double_word((n*4).try_into().unwrap()); }, // 0000000000000013 backward: // 13: 48 81 ee 80 00 00 00 subq $128, %rsi &bf::Expression::MoveBack(n) => { stream.emit_bytes(b"\x48\x81\xee"); stream.emit_double_word((n*4).try_into().unwrap()); }, // 0000000000000000 increment: // 0: 81 06 80 00 00 00 addl $128, (%rsi) &bf::Expression::IncValue(n) => { stream.emit_bytes(b"\x81\x06"); stream.emit_double_word(n); }, // 0000000000000006 decrement: // 6: 81 2e 80 00 00 00 subl $128, (%rsi) &bf::Expression::DecValue(n) => { stream.emit_bytes(b"\x81\x2e"); stream.emit_double_word(n); },
// 46: 0f 05 syscall bf::Expression::OutputValue => { stream.emit_bytes(b"\x48\xc7\xc0"); stream.emit_double_word(0x02000004); stream.emit_bytes(b"\x48\xc7\xc7\x01\x00\x00\x00"); stream.emit_bytes(b"\x48\xc7\xc2\x01\x00\x00\x00"); stream.emit_bytes(b"\x0f\x05"); }, // 000000000000001a read: // 1a: 48 c7 c0 03 00 00 02 movq $33554435, %rax // 21: 48 c7 c7 00 00 00 00 movq $0, %rdi // 28: 48 c7 c2 01 00 00 00 movq $1, %rdx // 2f: 0f 05 syscall bf::Expression::InputValue => { stream.emit_bytes(b"\x48\xc7\xc0"); stream.emit_double_word(0x02000003); stream.emit_bytes(b"\x48\xc7\xc7\x00\x00\x00\x00"); stream.emit_bytes(b"\x48\xc7\xc2\x01\x00\x00\x00"); stream.emit_bytes(b"\x0f\x05"); }, bf::Expression::Loop(sub_exp) => { let loop_start = stream.create_label(); let post_loop = stream.create_label(); // 48: 83 3e 00 cmpl $0, (%rsi) // 4b: 0f 84 00 00 00 00 je 0 <loop_end> stream.emit_bytes(b"\x83\x3e\x00"); stream.jz_Label_1(post_loop); // --* stream.attach_label(loop_start); // <-|-* // | | compile(sub_exp, stream); // | | // | | // 51: 83 3e 00 cmpl $0, (%rsi) // | | // 54: 0f 85 00 00 00 00 jne 0 // | | stream.emit_bytes(b"\x83\x3e\x00"); // | | stream.jnz_Label_1(loop_start); // --|-* stream.attach_label(post_loop); // <-* } } } } fn opcodes_size(stats: &bf::Stats) -> usize { return 7 * stats.fwd_count + 7 * stats.bwd_count + 6 * stats.inc_count + 6 * stats.dec_count + 23 * stats.output_count + 23 * stats.input_count + 9 * 2 * stats.loop_count; } fn run(expressions: &Vec<bf::Expression>) { let stats = bf::stats(expressions); let mem_size = opcodes_size(&stats) + 1 // retq + 8; // stream buffer let mut memory_map = ExecutableAnonymousMemoryMap::new(mem_size, false, false).unwrap(); let mut instruction_stream = memory_map.instruction_stream( &InstructionStreamHints { number_of_labels: stats.loop_count * 2, number_of_8_bit_jumps: 0, number_of_32_bit_jumps: stats.loop_count * 2, number_of_emitted_labels: 0 } ); // capture the address where the instructions start: let function_pointer = instruction_stream.nullary_function_pointer::<()>(); // transform all the expressions into opcodes: compile(expressions, &mut instruction_stream); // 000000000000005b finish: // 5b: c3 retq instruction_stream.emit_byte(0xc3); // resolve jumps and make memory executable: instruction_stream.finish(); // prepare working buffer: let memory = [0u32; 30000]; unsafe { // point RSI to the buffer: asm!( "movq {mem}, %rsi", mem = in (reg) &memory, options(att_syntax) ); // jump to the generated code: function_pointer(); } } fn main() { let args: Vec<String> = env::args().collect(); let filename = match args.get(1) { Some(s) => s, None => panic!("missing filename") }; let mut contents = String::new(); let mut file = File::open(filename).expect("Error opening file"); file.read_to_string(&mut contents).expect("Error reading file"); let tokens = bf::tokenize(&contents.chars().collect()); let expressions = bf::parse(&tokens).expect("Error compiling program"); let expressions = bf::optimize(&expressions); run(&expressions); }
// 0000000000000031 write: // 31: 48 c7 c0 04 00 00 02 movq $33554436, %rax // 38: 48 c7 c7 01 00 00 00 movq $1, %rdi // 3f: 48 c7 c2 01 00 00 00 movq $1, %rdx
random_line_split
jit.rs
#![feature(asm)] use std::io::{prelude::*}; use std::env; use std::fs::File; use std::convert::TryInto; use brainfuck as bf; extern crate assembler; use ::assembler::{ ExecutableAnonymousMemoryMap, InstructionStream, InstructionStreamHints }; fn compile(expressions: &Vec<bf::Expression>, stream: &mut InstructionStream) { for expression in expressions { match expression { // 000000000000000c forward: // c: 48 81 c6 80 00 00 00 addq $128, %rsi &bf::Expression::MoveForward(n) => { stream.emit_bytes(b"\x48\x81\xc6"); stream.emit_double_word((n*4).try_into().unwrap()); }, // 0000000000000013 backward: // 13: 48 81 ee 80 00 00 00 subq $128, %rsi &bf::Expression::MoveBack(n) => { stream.emit_bytes(b"\x48\x81\xee"); stream.emit_double_word((n*4).try_into().unwrap()); }, // 0000000000000000 increment: // 0: 81 06 80 00 00 00 addl $128, (%rsi) &bf::Expression::IncValue(n) => { stream.emit_bytes(b"\x81\x06"); stream.emit_double_word(n); }, // 0000000000000006 decrement: // 6: 81 2e 80 00 00 00 subl $128, (%rsi) &bf::Expression::DecValue(n) => { stream.emit_bytes(b"\x81\x2e"); stream.emit_double_word(n); }, // 0000000000000031 write: // 31: 48 c7 c0 04 00 00 02 movq $33554436, %rax // 38: 48 c7 c7 01 00 00 00 movq $1, %rdi // 3f: 48 c7 c2 01 00 00 00 movq $1, %rdx // 46: 0f 05 syscall bf::Expression::OutputValue => { stream.emit_bytes(b"\x48\xc7\xc0"); stream.emit_double_word(0x02000004); stream.emit_bytes(b"\x48\xc7\xc7\x01\x00\x00\x00"); stream.emit_bytes(b"\x48\xc7\xc2\x01\x00\x00\x00"); stream.emit_bytes(b"\x0f\x05"); }, // 000000000000001a read: // 1a: 48 c7 c0 03 00 00 02 movq $33554435, %rax // 21: 48 c7 c7 00 00 00 00 movq $0, %rdi // 28: 48 c7 c2 01 00 00 00 movq $1, %rdx // 2f: 0f 05 syscall bf::Expression::InputValue => { stream.emit_bytes(b"\x48\xc7\xc0"); stream.emit_double_word(0x02000003); stream.emit_bytes(b"\x48\xc7\xc7\x00\x00\x00\x00"); stream.emit_bytes(b"\x48\xc7\xc2\x01\x00\x00\x00"); stream.emit_bytes(b"\x0f\x05"); }, bf::Expression::Loop(sub_exp) => { let loop_start = stream.create_label(); let post_loop = stream.create_label(); // 48: 83 3e 00 cmpl $0, (%rsi) // 4b: 0f 84 00 00 00 00 je 0 <loop_end> stream.emit_bytes(b"\x83\x3e\x00"); stream.jz_Label_1(post_loop); // --* stream.attach_label(loop_start); // <-|-* // | | compile(sub_exp, stream); // | | // | | // 51: 83 3e 00 cmpl $0, (%rsi) // | | // 54: 0f 85 00 00 00 00 jne 0 // | | stream.emit_bytes(b"\x83\x3e\x00"); // | | stream.jnz_Label_1(loop_start); // --|-* stream.attach_label(post_loop); // <-* } } } } fn opcodes_size(stats: &bf::Stats) -> usize { return 7 * stats.fwd_count + 7 * stats.bwd_count + 6 * stats.inc_count + 6 * stats.dec_count + 23 * stats.output_count + 23 * stats.input_count + 9 * 2 * stats.loop_count; } fn run(expressions: &Vec<bf::Expression>) { let stats = bf::stats(expressions); let mem_size = opcodes_size(&stats) + 1 // retq + 8; // stream buffer let mut memory_map = ExecutableAnonymousMemoryMap::new(mem_size, false, false).unwrap(); let mut instruction_stream = memory_map.instruction_stream( &InstructionStreamHints { number_of_labels: stats.loop_count * 2, number_of_8_bit_jumps: 0, number_of_32_bit_jumps: stats.loop_count * 2, number_of_emitted_labels: 0 } ); // capture the address where the instructions start: let function_pointer = instruction_stream.nullary_function_pointer::<()>(); // transform all the expressions into opcodes: compile(expressions, &mut instruction_stream); // 000000000000005b finish: // 5b: c3 retq instruction_stream.emit_byte(0xc3); // resolve jumps and make memory executable: instruction_stream.finish(); // prepare working buffer: let memory = [0u32; 30000]; unsafe { // point RSI to the buffer: asm!( "movq {mem}, %rsi", mem = in (reg) &memory, options(att_syntax) ); // jump to the generated code: function_pointer(); } } fn
() { let args: Vec<String> = env::args().collect(); let filename = match args.get(1) { Some(s) => s, None => panic!("missing filename") }; let mut contents = String::new(); let mut file = File::open(filename).expect("Error opening file"); file.read_to_string(&mut contents).expect("Error reading file"); let tokens = bf::tokenize(&contents.chars().collect()); let expressions = bf::parse(&tokens).expect("Error compiling program"); let expressions = bf::optimize(&expressions); run(&expressions); }
main
identifier_name
jit.rs
#![feature(asm)] use std::io::{prelude::*}; use std::env; use std::fs::File; use std::convert::TryInto; use brainfuck as bf; extern crate assembler; use ::assembler::{ ExecutableAnonymousMemoryMap, InstructionStream, InstructionStreamHints }; fn compile(expressions: &Vec<bf::Expression>, stream: &mut InstructionStream) { for expression in expressions { match expression { // 000000000000000c forward: // c: 48 81 c6 80 00 00 00 addq $128, %rsi &bf::Expression::MoveForward(n) => { stream.emit_bytes(b"\x48\x81\xc6"); stream.emit_double_word((n*4).try_into().unwrap()); }, // 0000000000000013 backward: // 13: 48 81 ee 80 00 00 00 subq $128, %rsi &bf::Expression::MoveBack(n) => { stream.emit_bytes(b"\x48\x81\xee"); stream.emit_double_word((n*4).try_into().unwrap()); }, // 0000000000000000 increment: // 0: 81 06 80 00 00 00 addl $128, (%rsi) &bf::Expression::IncValue(n) => { stream.emit_bytes(b"\x81\x06"); stream.emit_double_word(n); }, // 0000000000000006 decrement: // 6: 81 2e 80 00 00 00 subl $128, (%rsi) &bf::Expression::DecValue(n) => { stream.emit_bytes(b"\x81\x2e"); stream.emit_double_word(n); }, // 0000000000000031 write: // 31: 48 c7 c0 04 00 00 02 movq $33554436, %rax // 38: 48 c7 c7 01 00 00 00 movq $1, %rdi // 3f: 48 c7 c2 01 00 00 00 movq $1, %rdx // 46: 0f 05 syscall bf::Expression::OutputValue => { stream.emit_bytes(b"\x48\xc7\xc0"); stream.emit_double_word(0x02000004); stream.emit_bytes(b"\x48\xc7\xc7\x01\x00\x00\x00"); stream.emit_bytes(b"\x48\xc7\xc2\x01\x00\x00\x00"); stream.emit_bytes(b"\x0f\x05"); }, // 000000000000001a read: // 1a: 48 c7 c0 03 00 00 02 movq $33554435, %rax // 21: 48 c7 c7 00 00 00 00 movq $0, %rdi // 28: 48 c7 c2 01 00 00 00 movq $1, %rdx // 2f: 0f 05 syscall bf::Expression::InputValue => { stream.emit_bytes(b"\x48\xc7\xc0"); stream.emit_double_word(0x02000003); stream.emit_bytes(b"\x48\xc7\xc7\x00\x00\x00\x00"); stream.emit_bytes(b"\x48\xc7\xc2\x01\x00\x00\x00"); stream.emit_bytes(b"\x0f\x05"); }, bf::Expression::Loop(sub_exp) => { let loop_start = stream.create_label(); let post_loop = stream.create_label(); // 48: 83 3e 00 cmpl $0, (%rsi) // 4b: 0f 84 00 00 00 00 je 0 <loop_end> stream.emit_bytes(b"\x83\x3e\x00"); stream.jz_Label_1(post_loop); // --* stream.attach_label(loop_start); // <-|-* // | | compile(sub_exp, stream); // | | // | | // 51: 83 3e 00 cmpl $0, (%rsi) // | | // 54: 0f 85 00 00 00 00 jne 0 // | | stream.emit_bytes(b"\x83\x3e\x00"); // | | stream.jnz_Label_1(loop_start); // --|-* stream.attach_label(post_loop); // <-* } } } } fn opcodes_size(stats: &bf::Stats) -> usize { return 7 * stats.fwd_count + 7 * stats.bwd_count + 6 * stats.inc_count + 6 * stats.dec_count + 23 * stats.output_count + 23 * stats.input_count + 9 * 2 * stats.loop_count; } fn run(expressions: &Vec<bf::Expression>) { let stats = bf::stats(expressions); let mem_size = opcodes_size(&stats) + 1 // retq + 8; // stream buffer let mut memory_map = ExecutableAnonymousMemoryMap::new(mem_size, false, false).unwrap(); let mut instruction_stream = memory_map.instruction_stream( &InstructionStreamHints { number_of_labels: stats.loop_count * 2, number_of_8_bit_jumps: 0, number_of_32_bit_jumps: stats.loop_count * 2, number_of_emitted_labels: 0 } ); // capture the address where the instructions start: let function_pointer = instruction_stream.nullary_function_pointer::<()>(); // transform all the expressions into opcodes: compile(expressions, &mut instruction_stream); // 000000000000005b finish: // 5b: c3 retq instruction_stream.emit_byte(0xc3); // resolve jumps and make memory executable: instruction_stream.finish(); // prepare working buffer: let memory = [0u32; 30000]; unsafe { // point RSI to the buffer: asm!( "movq {mem}, %rsi", mem = in (reg) &memory, options(att_syntax) ); // jump to the generated code: function_pointer(); } } fn main()
{ let args: Vec<String> = env::args().collect(); let filename = match args.get(1) { Some(s) => s, None => panic!("missing filename") }; let mut contents = String::new(); let mut file = File::open(filename).expect("Error opening file"); file.read_to_string(&mut contents).expect("Error reading file"); let tokens = bf::tokenize(&contents.chars().collect()); let expressions = bf::parse(&tokens).expect("Error compiling program"); let expressions = bf::optimize(&expressions); run(&expressions); }
identifier_body
selectors.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use cssparser::Parser; use selectors::parser::{Selector, ParserContext, parse_selector_list}; use style::selector_impl::TheSelectorImpl; fn parse(input: &mut Parser) -> Result<Selector<TheSelectorImpl>, ()> { let mut context = ParserContext::new(); context.in_user_agent_stylesheet = true; context.namespace_prefixes.insert("svg".into(), ns!(svg)); parse_selector_list(&context, input).map(|mut vec| vec.pop().unwrap()) } #[test] fn test_selectors() { assert_roundtrip!(parse, "div"); assert_roundtrip!(parse, "svg|circle"); assert_roundtrip!(parse, "p:before", "p::before"); assert_roundtrip!(parse, "[border = \"0\"]:-servo-nonzero-border ~ ::-servo-details-summary");
}
random_line_split
selectors.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use cssparser::Parser; use selectors::parser::{Selector, ParserContext, parse_selector_list}; use style::selector_impl::TheSelectorImpl; fn parse(input: &mut Parser) -> Result<Selector<TheSelectorImpl>, ()>
#[test] fn test_selectors() { assert_roundtrip!(parse, "div"); assert_roundtrip!(parse, "svg|circle"); assert_roundtrip!(parse, "p:before", "p::before"); assert_roundtrip!(parse, "[border = \"0\"]:-servo-nonzero-border ~ ::-servo-details-summary"); }
{ let mut context = ParserContext::new(); context.in_user_agent_stylesheet = true; context.namespace_prefixes.insert("svg".into(), ns!(svg)); parse_selector_list(&context, input).map(|mut vec| vec.pop().unwrap()) }
identifier_body
selectors.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use cssparser::Parser; use selectors::parser::{Selector, ParserContext, parse_selector_list}; use style::selector_impl::TheSelectorImpl; fn parse(input: &mut Parser) -> Result<Selector<TheSelectorImpl>, ()> { let mut context = ParserContext::new(); context.in_user_agent_stylesheet = true; context.namespace_prefixes.insert("svg".into(), ns!(svg)); parse_selector_list(&context, input).map(|mut vec| vec.pop().unwrap()) } #[test] fn
() { assert_roundtrip!(parse, "div"); assert_roundtrip!(parse, "svg|circle"); assert_roundtrip!(parse, "p:before", "p::before"); assert_roundtrip!(parse, "[border = \"0\"]:-servo-nonzero-border ~ ::-servo-details-summary"); }
test_selectors
identifier_name
webdriver_msg.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use constellation_msg::PipelineId;
use rustc_serialize::json::{Json, ToJson}; use url::Url; #[derive(Deserialize, Serialize)] pub enum WebDriverScriptCommand { ExecuteScript(String, IpcSender<WebDriverJSResult>), ExecuteAsyncScript(String, IpcSender<WebDriverJSResult>), FindElementCSS(String, IpcSender<Result<Option<String>, ()>>), FindElementsCSS(String, IpcSender<Result<Vec<String>, ()>>), FocusElement(String, IpcSender<Result<(), ()>>), GetActiveElement(IpcSender<Option<String>>), GetElementTagName(String, IpcSender<Result<String, ()>>), GetElementText(String, IpcSender<Result<String, ()>>), GetFrameId(WebDriverFrameId, IpcSender<Result<Option<PipelineId>, ()>>), GetUrl(IpcSender<Url>), GetTitle(IpcSender<String>) } #[derive(Deserialize, Serialize)] pub enum WebDriverJSValue { Undefined, Null, Boolean(bool), Number(f64), String(String), // TODO: Object and WebElement } #[derive(Deserialize, Serialize)] pub enum WebDriverJSError { Timeout, UnknownType } pub type WebDriverJSResult = Result<WebDriverJSValue, WebDriverJSError>; #[derive(Deserialize, Serialize)] pub enum WebDriverFrameId { Short(u16), Element(String), Parent } impl ToJson for WebDriverJSValue { fn to_json(&self) -> Json { match *self { WebDriverJSValue::Undefined => Json::Null, WebDriverJSValue::Null => Json::Null, WebDriverJSValue::Boolean(ref x) => x.to_json(), WebDriverJSValue::Number(ref x) => x.to_json(), WebDriverJSValue::String(ref x) => x.to_json() } } } #[derive(Deserialize, Serialize)] pub enum LoadStatus { LoadComplete, LoadTimeout }
use ipc_channel::ipc::IpcSender;
random_line_split
webdriver_msg.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use constellation_msg::PipelineId; use ipc_channel::ipc::IpcSender; use rustc_serialize::json::{Json, ToJson}; use url::Url; #[derive(Deserialize, Serialize)] pub enum
{ ExecuteScript(String, IpcSender<WebDriverJSResult>), ExecuteAsyncScript(String, IpcSender<WebDriverJSResult>), FindElementCSS(String, IpcSender<Result<Option<String>, ()>>), FindElementsCSS(String, IpcSender<Result<Vec<String>, ()>>), FocusElement(String, IpcSender<Result<(), ()>>), GetActiveElement(IpcSender<Option<String>>), GetElementTagName(String, IpcSender<Result<String, ()>>), GetElementText(String, IpcSender<Result<String, ()>>), GetFrameId(WebDriverFrameId, IpcSender<Result<Option<PipelineId>, ()>>), GetUrl(IpcSender<Url>), GetTitle(IpcSender<String>) } #[derive(Deserialize, Serialize)] pub enum WebDriverJSValue { Undefined, Null, Boolean(bool), Number(f64), String(String), // TODO: Object and WebElement } #[derive(Deserialize, Serialize)] pub enum WebDriverJSError { Timeout, UnknownType } pub type WebDriverJSResult = Result<WebDriverJSValue, WebDriverJSError>; #[derive(Deserialize, Serialize)] pub enum WebDriverFrameId { Short(u16), Element(String), Parent } impl ToJson for WebDriverJSValue { fn to_json(&self) -> Json { match *self { WebDriverJSValue::Undefined => Json::Null, WebDriverJSValue::Null => Json::Null, WebDriverJSValue::Boolean(ref x) => x.to_json(), WebDriverJSValue::Number(ref x) => x.to_json(), WebDriverJSValue::String(ref x) => x.to_json() } } } #[derive(Deserialize, Serialize)] pub enum LoadStatus { LoadComplete, LoadTimeout }
WebDriverScriptCommand
identifier_name
crateresolve4b-2.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // aux-build:crateresolve4a-1.rs // aux-build:crateresolve4a-2.rs #[link(name = "crateresolve4b", vers = "0.2")]; #[crate_type = "lib"]; extern mod crateresolve4a(vers="0.1"); pub fn g() -> int
{ crateresolve4a::f() }
identifier_body
crateresolve4b-2.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // aux-build:crateresolve4a-1.rs // aux-build:crateresolve4a-2.rs #[link(name = "crateresolve4b", vers = "0.2")]; #[crate_type = "lib"]; extern mod crateresolve4a(vers="0.1"); pub fn
() -> int { crateresolve4a::f() }
g
identifier_name
mod.rs
//! The compiler code necessary to implement the `#[derive]` extensions. use rustc_ast as ast; use rustc_ast::ptr::P; use rustc_ast::{Impl, ItemKind, MetaItem}; use rustc_expand::base::{Annotatable, ExpandResult, ExtCtxt, MultiItemModifier}; use rustc_span::symbol::{sym, Ident, Symbol}; use rustc_span::Span; macro path_local($x:ident) { generic::ty::Path::new_local(sym::$x) } macro pathvec_std($($rest:ident)::+) {{ vec![ $( sym::$rest ),+ ] }} macro path_std($($x:tt)*) { generic::ty::Path::new( pathvec_std!( $($x)* ) ) } pub mod bounds; pub mod clone; pub mod debug; pub mod decodable; pub mod default; pub mod encodable; pub mod hash; #[path = "cmp/eq.rs"] pub mod eq; #[path = "cmp/ord.rs"] pub mod ord; #[path = "cmp/partial_eq.rs"] pub mod partial_eq; #[path = "cmp/partial_ord.rs"] pub mod partial_ord; pub mod generic; crate struct BuiltinDerive( crate fn(&mut ExtCtxt<'_>, Span, &MetaItem, &Annotatable, &mut dyn FnMut(Annotatable)), ); impl MultiItemModifier for BuiltinDerive { fn expand( &self, ecx: &mut ExtCtxt<'_>, span: Span, meta_item: &MetaItem, item: Annotatable, ) -> ExpandResult<Vec<Annotatable>, Annotatable> { // FIXME: Built-in derives often forget to give spans contexts, // so we are doing it here in a centralized way. let span = ecx.with_def_site_ctxt(span); let mut items = Vec::new(); match item { Annotatable::Stmt(stmt) =>
_ => { (self.0)(ecx, span, meta_item, &item, &mut |a| items.push(a)); } } ExpandResult::Ready(items) } } /// Constructs an expression that calls an intrinsic fn call_intrinsic( cx: &ExtCtxt<'_>, span: Span, intrinsic: Symbol, args: Vec<P<ast::Expr>>, ) -> P<ast::Expr> { let span = cx.with_def_site_ctxt(span); let path = cx.std_path(&[sym::intrinsics, intrinsic]); cx.expr_call_global(span, path, args) } /// Constructs an expression that calls the `unreachable` intrinsic. fn call_unreachable(cx: &ExtCtxt<'_>, span: Span) -> P<ast::Expr> { let span = cx.with_def_site_ctxt(span); let path = cx.std_path(&[sym::intrinsics, sym::unreachable]); let call = cx.expr_call_global(span, path, vec![]); cx.expr_block(P(ast::Block { stmts: vec![cx.stmt_expr(call)], id: ast::DUMMY_NODE_ID, rules: ast::BlockCheckMode::Unsafe(ast::CompilerGenerated), span, tokens: None, could_be_bare_literal: false, })) } // Injects `impl<...> Structural for ItemType<...> { }`. In particular, // does *not* add `where T: Structural` for parameters `T` in `...`. // (That's the main reason we cannot use TraitDef here.) fn inject_impl_of_structural_trait( cx: &mut ExtCtxt<'_>, span: Span, item: &Annotatable, structural_path: generic::ty::Path, push: &mut dyn FnMut(Annotatable), ) { let item = match *item { Annotatable::Item(ref item) => item, _ => unreachable!(), }; let generics = match item.kind { ItemKind::Struct(_, ref generics) | ItemKind::Enum(_, ref generics) => generics, // Do not inject `impl Structural for Union`. (`PartialEq` does not // support unions, so we will see error downstream.) ItemKind::Union(..) => return, _ => unreachable!(), }; // Create generics param list for where clauses and impl headers let mut generics = generics.clone(); // Create the type of `self`. // // in addition, remove defaults from type params (impls cannot have them). let self_params: Vec<_> = generics .params .iter_mut() .map(|param| match &mut param.kind { ast::GenericParamKind::Lifetime => { ast::GenericArg::Lifetime(cx.lifetime(span, param.ident)) } ast::GenericParamKind::Type { default } => { *default = None; ast::GenericArg::Type(cx.ty_ident(span, param.ident)) } ast::GenericParamKind::Const { ty: _, kw_span: _, default } => { *default = None; ast::GenericArg::Const(cx.const_ident(span, param.ident)) } }) .collect(); let type_ident = item.ident; let trait_ref = cx.trait_ref(structural_path.to_path(cx, span, type_ident, &generics)); let self_type = cx.ty_path(cx.path_all(span, false, vec![type_ident], self_params)); // It would be nice to also encode constraint `where Self: Eq` (by adding it // onto `generics` cloned above). Unfortunately, that strategy runs afoul of // rust-lang/rust#48214. So we perform that additional check in the compiler // itself, instead of encoding it here. // Keep the lint and stability attributes of the original item, to control // how the generated implementation is linted. let mut attrs = Vec::new(); attrs.extend( item.attrs .iter() .filter(|a| { [sym::allow, sym::warn, sym::deny, sym::forbid, sym::stable, sym::unstable] .contains(&a.name_or_empty()) }) .cloned(), ); let newitem = cx.item( span, Ident::empty(), attrs, ItemKind::Impl(Box::new(Impl { unsafety: ast::Unsafe::No, polarity: ast::ImplPolarity::Positive, defaultness: ast::Defaultness::Final, constness: ast::Const::No, generics, of_trait: Some(trait_ref), self_ty: self_type, items: Vec::new(), })), ); push(Annotatable::Item(newitem)); }
{ if let ast::StmtKind::Item(item) = stmt.into_inner().kind { (self.0)(ecx, span, meta_item, &Annotatable::Item(item), &mut |a| { // Cannot use 'ecx.stmt_item' here, because we need to pass 'ecx' // to the function items.push(Annotatable::Stmt(P(ast::Stmt { id: ast::DUMMY_NODE_ID, kind: ast::StmtKind::Item(a.expect_item()), span, }))); }); } else { unreachable!("should have already errored on non-item statement") } }
conditional_block
mod.rs
//! The compiler code necessary to implement the `#[derive]` extensions. use rustc_ast as ast; use rustc_ast::ptr::P; use rustc_ast::{Impl, ItemKind, MetaItem}; use rustc_expand::base::{Annotatable, ExpandResult, ExtCtxt, MultiItemModifier}; use rustc_span::symbol::{sym, Ident, Symbol}; use rustc_span::Span; macro path_local($x:ident) { generic::ty::Path::new_local(sym::$x) }
macro pathvec_std($($rest:ident)::+) {{ vec![ $( sym::$rest ),+ ] }} macro path_std($($x:tt)*) { generic::ty::Path::new( pathvec_std!( $($x)* ) ) } pub mod bounds; pub mod clone; pub mod debug; pub mod decodable; pub mod default; pub mod encodable; pub mod hash; #[path = "cmp/eq.rs"] pub mod eq; #[path = "cmp/ord.rs"] pub mod ord; #[path = "cmp/partial_eq.rs"] pub mod partial_eq; #[path = "cmp/partial_ord.rs"] pub mod partial_ord; pub mod generic; crate struct BuiltinDerive( crate fn(&mut ExtCtxt<'_>, Span, &MetaItem, &Annotatable, &mut dyn FnMut(Annotatable)), ); impl MultiItemModifier for BuiltinDerive { fn expand( &self, ecx: &mut ExtCtxt<'_>, span: Span, meta_item: &MetaItem, item: Annotatable, ) -> ExpandResult<Vec<Annotatable>, Annotatable> { // FIXME: Built-in derives often forget to give spans contexts, // so we are doing it here in a centralized way. let span = ecx.with_def_site_ctxt(span); let mut items = Vec::new(); match item { Annotatable::Stmt(stmt) => { if let ast::StmtKind::Item(item) = stmt.into_inner().kind { (self.0)(ecx, span, meta_item, &Annotatable::Item(item), &mut |a| { // Cannot use 'ecx.stmt_item' here, because we need to pass 'ecx' // to the function items.push(Annotatable::Stmt(P(ast::Stmt { id: ast::DUMMY_NODE_ID, kind: ast::StmtKind::Item(a.expect_item()), span, }))); }); } else { unreachable!("should have already errored on non-item statement") } } _ => { (self.0)(ecx, span, meta_item, &item, &mut |a| items.push(a)); } } ExpandResult::Ready(items) } } /// Constructs an expression that calls an intrinsic fn call_intrinsic( cx: &ExtCtxt<'_>, span: Span, intrinsic: Symbol, args: Vec<P<ast::Expr>>, ) -> P<ast::Expr> { let span = cx.with_def_site_ctxt(span); let path = cx.std_path(&[sym::intrinsics, intrinsic]); cx.expr_call_global(span, path, args) } /// Constructs an expression that calls the `unreachable` intrinsic. fn call_unreachable(cx: &ExtCtxt<'_>, span: Span) -> P<ast::Expr> { let span = cx.with_def_site_ctxt(span); let path = cx.std_path(&[sym::intrinsics, sym::unreachable]); let call = cx.expr_call_global(span, path, vec![]); cx.expr_block(P(ast::Block { stmts: vec![cx.stmt_expr(call)], id: ast::DUMMY_NODE_ID, rules: ast::BlockCheckMode::Unsafe(ast::CompilerGenerated), span, tokens: None, could_be_bare_literal: false, })) } // Injects `impl<...> Structural for ItemType<...> { }`. In particular, // does *not* add `where T: Structural` for parameters `T` in `...`. // (That's the main reason we cannot use TraitDef here.) fn inject_impl_of_structural_trait( cx: &mut ExtCtxt<'_>, span: Span, item: &Annotatable, structural_path: generic::ty::Path, push: &mut dyn FnMut(Annotatable), ) { let item = match *item { Annotatable::Item(ref item) => item, _ => unreachable!(), }; let generics = match item.kind { ItemKind::Struct(_, ref generics) | ItemKind::Enum(_, ref generics) => generics, // Do not inject `impl Structural for Union`. (`PartialEq` does not // support unions, so we will see error downstream.) ItemKind::Union(..) => return, _ => unreachable!(), }; // Create generics param list for where clauses and impl headers let mut generics = generics.clone(); // Create the type of `self`. // // in addition, remove defaults from type params (impls cannot have them). let self_params: Vec<_> = generics .params .iter_mut() .map(|param| match &mut param.kind { ast::GenericParamKind::Lifetime => { ast::GenericArg::Lifetime(cx.lifetime(span, param.ident)) } ast::GenericParamKind::Type { default } => { *default = None; ast::GenericArg::Type(cx.ty_ident(span, param.ident)) } ast::GenericParamKind::Const { ty: _, kw_span: _, default } => { *default = None; ast::GenericArg::Const(cx.const_ident(span, param.ident)) } }) .collect(); let type_ident = item.ident; let trait_ref = cx.trait_ref(structural_path.to_path(cx, span, type_ident, &generics)); let self_type = cx.ty_path(cx.path_all(span, false, vec![type_ident], self_params)); // It would be nice to also encode constraint `where Self: Eq` (by adding it // onto `generics` cloned above). Unfortunately, that strategy runs afoul of // rust-lang/rust#48214. So we perform that additional check in the compiler // itself, instead of encoding it here. // Keep the lint and stability attributes of the original item, to control // how the generated implementation is linted. let mut attrs = Vec::new(); attrs.extend( item.attrs .iter() .filter(|a| { [sym::allow, sym::warn, sym::deny, sym::forbid, sym::stable, sym::unstable] .contains(&a.name_or_empty()) }) .cloned(), ); let newitem = cx.item( span, Ident::empty(), attrs, ItemKind::Impl(Box::new(Impl { unsafety: ast::Unsafe::No, polarity: ast::ImplPolarity::Positive, defaultness: ast::Defaultness::Final, constness: ast::Const::No, generics, of_trait: Some(trait_ref), self_ty: self_type, items: Vec::new(), })), ); push(Annotatable::Item(newitem)); }
random_line_split
structinfo.rs
// GObject Introspection Rust bindings. // Copyright (C) 2014 Luis Araujo <[email protected]> // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2.1 of the License, or (at your option) any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA extern crate libc; use structinfo::libc::{c_char, c_int, size_t}; use glib_gobject::GBoolean; use types::{GIStructInfo, GIFunctionInfo, GIFieldInfo}; use std::mem::transmute; #[link(name = "girepository-1.0")] extern "C" { fn g_struct_info_get_n_fields(info: *GIStructInfo) -> c_int; fn g_struct_info_get_field(info: *GIStructInfo, n: c_int) -> *GIFieldInfo; fn g_struct_info_get_n_methods(info: *GIStructInfo) -> c_int; fn g_struct_info_get_method(info: *GIStructInfo, n: c_int) -> *GIFunctionInfo; fn g_struct_info_find_method(info: *GIStructInfo, name: *c_char) -> *GIFunctionInfo; fn g_struct_info_get_size(info: *GIStructInfo) -> size_t; fn g_struct_info_get_alignment(info: *GIStructInfo) -> size_t; fn g_struct_info_is_gtype_struct(info: *GIStructInfo) -> GBoolean; fn g_struct_info_is_foreign(info: *GIStructInfo) -> GBoolean; } /// Obtain the number of fields this structure has. pub fn get_n_fields(info: *GIStructInfo) -> int { unsafe { g_struct_info_get_n_fields(info) as int } } /// Obtain the type information for field with specified index.
unsafe { g_struct_info_get_field(info, n as c_int) } } /// Obtain the number of methods this structure has. pub fn get_n_methods(info: *GIStructInfo) -> int { unsafe { g_struct_info_get_n_methods(info) as int } } /// Obtain the type information for method with specified index. pub fn get_method(info: *GIStructInfo, n: int) -> *GIFunctionInfo { unsafe { g_struct_info_get_method(info, n as c_int) } } /// Obtain the type information for method named name. pub fn find_method(info: *GIStructInfo, name: &str) -> *GIFunctionInfo { name.with_c_str(|c_name| unsafe { g_struct_info_find_method(info, c_name) }) } /// Obtain the total size of the structure. pub fn get_size(info: *GIStructInfo) -> size_t { unsafe { g_struct_info_get_size(info) } } /// Obtain the required alignment of the structure. pub fn get_alignment(info: *GIStructInfo) -> size_t { unsafe { g_struct_info_get_alignment(info) } } /// Return true if this structure represents the "class structure" for some /// GObject or GInterface. pub fn is_gtype_struct(info: *GIStructInfo) -> GBoolean { unsafe { g_struct_info_is_gtype_struct(info) } } pub fn is_foreign(info: *GIStructInfo) -> GBoolean { unsafe { g_struct_info_is_foreign(info) } } /// Convert GIBaseInfo to GIStructInfo. pub fn to_gi_struct_info<T>(object: *T) -> *GIStructInfo { unsafe { transmute(object) } }
pub fn get_field(info: *GIStructInfo, n: int) -> *GIFieldInfo {
random_line_split
structinfo.rs
// GObject Introspection Rust bindings. // Copyright (C) 2014 Luis Araujo <[email protected]> // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2.1 of the License, or (at your option) any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA extern crate libc; use structinfo::libc::{c_char, c_int, size_t}; use glib_gobject::GBoolean; use types::{GIStructInfo, GIFunctionInfo, GIFieldInfo}; use std::mem::transmute; #[link(name = "girepository-1.0")] extern "C" { fn g_struct_info_get_n_fields(info: *GIStructInfo) -> c_int; fn g_struct_info_get_field(info: *GIStructInfo, n: c_int) -> *GIFieldInfo; fn g_struct_info_get_n_methods(info: *GIStructInfo) -> c_int; fn g_struct_info_get_method(info: *GIStructInfo, n: c_int) -> *GIFunctionInfo; fn g_struct_info_find_method(info: *GIStructInfo, name: *c_char) -> *GIFunctionInfo; fn g_struct_info_get_size(info: *GIStructInfo) -> size_t; fn g_struct_info_get_alignment(info: *GIStructInfo) -> size_t; fn g_struct_info_is_gtype_struct(info: *GIStructInfo) -> GBoolean; fn g_struct_info_is_foreign(info: *GIStructInfo) -> GBoolean; } /// Obtain the number of fields this structure has. pub fn get_n_fields(info: *GIStructInfo) -> int { unsafe { g_struct_info_get_n_fields(info) as int } } /// Obtain the type information for field with specified index. pub fn get_field(info: *GIStructInfo, n: int) -> *GIFieldInfo { unsafe { g_struct_info_get_field(info, n as c_int) } } /// Obtain the number of methods this structure has. pub fn get_n_methods(info: *GIStructInfo) -> int { unsafe { g_struct_info_get_n_methods(info) as int } } /// Obtain the type information for method with specified index. pub fn get_method(info: *GIStructInfo, n: int) -> *GIFunctionInfo { unsafe { g_struct_info_get_method(info, n as c_int) } } /// Obtain the type information for method named name. pub fn find_method(info: *GIStructInfo, name: &str) -> *GIFunctionInfo { name.with_c_str(|c_name| unsafe { g_struct_info_find_method(info, c_name) }) } /// Obtain the total size of the structure. pub fn get_size(info: *GIStructInfo) -> size_t
/// Obtain the required alignment of the structure. pub fn get_alignment(info: *GIStructInfo) -> size_t { unsafe { g_struct_info_get_alignment(info) } } /// Return true if this structure represents the "class structure" for some /// GObject or GInterface. pub fn is_gtype_struct(info: *GIStructInfo) -> GBoolean { unsafe { g_struct_info_is_gtype_struct(info) } } pub fn is_foreign(info: *GIStructInfo) -> GBoolean { unsafe { g_struct_info_is_foreign(info) } } /// Convert GIBaseInfo to GIStructInfo. pub fn to_gi_struct_info<T>(object: *T) -> *GIStructInfo { unsafe { transmute(object) } }
{ unsafe { g_struct_info_get_size(info) } }
identifier_body
structinfo.rs
// GObject Introspection Rust bindings. // Copyright (C) 2014 Luis Araujo <[email protected]> // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2.1 of the License, or (at your option) any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA extern crate libc; use structinfo::libc::{c_char, c_int, size_t}; use glib_gobject::GBoolean; use types::{GIStructInfo, GIFunctionInfo, GIFieldInfo}; use std::mem::transmute; #[link(name = "girepository-1.0")] extern "C" { fn g_struct_info_get_n_fields(info: *GIStructInfo) -> c_int; fn g_struct_info_get_field(info: *GIStructInfo, n: c_int) -> *GIFieldInfo; fn g_struct_info_get_n_methods(info: *GIStructInfo) -> c_int; fn g_struct_info_get_method(info: *GIStructInfo, n: c_int) -> *GIFunctionInfo; fn g_struct_info_find_method(info: *GIStructInfo, name: *c_char) -> *GIFunctionInfo; fn g_struct_info_get_size(info: *GIStructInfo) -> size_t; fn g_struct_info_get_alignment(info: *GIStructInfo) -> size_t; fn g_struct_info_is_gtype_struct(info: *GIStructInfo) -> GBoolean; fn g_struct_info_is_foreign(info: *GIStructInfo) -> GBoolean; } /// Obtain the number of fields this structure has. pub fn get_n_fields(info: *GIStructInfo) -> int { unsafe { g_struct_info_get_n_fields(info) as int } } /// Obtain the type information for field with specified index. pub fn get_field(info: *GIStructInfo, n: int) -> *GIFieldInfo { unsafe { g_struct_info_get_field(info, n as c_int) } } /// Obtain the number of methods this structure has. pub fn get_n_methods(info: *GIStructInfo) -> int { unsafe { g_struct_info_get_n_methods(info) as int } } /// Obtain the type information for method with specified index. pub fn get_method(info: *GIStructInfo, n: int) -> *GIFunctionInfo { unsafe { g_struct_info_get_method(info, n as c_int) } } /// Obtain the type information for method named name. pub fn find_method(info: *GIStructInfo, name: &str) -> *GIFunctionInfo { name.with_c_str(|c_name| unsafe { g_struct_info_find_method(info, c_name) }) } /// Obtain the total size of the structure. pub fn get_size(info: *GIStructInfo) -> size_t { unsafe { g_struct_info_get_size(info) } } /// Obtain the required alignment of the structure. pub fn get_alignment(info: *GIStructInfo) -> size_t { unsafe { g_struct_info_get_alignment(info) } } /// Return true if this structure represents the "class structure" for some /// GObject or GInterface. pub fn is_gtype_struct(info: *GIStructInfo) -> GBoolean { unsafe { g_struct_info_is_gtype_struct(info) } } pub fn
(info: *GIStructInfo) -> GBoolean { unsafe { g_struct_info_is_foreign(info) } } /// Convert GIBaseInfo to GIStructInfo. pub fn to_gi_struct_info<T>(object: *T) -> *GIStructInfo { unsafe { transmute(object) } }
is_foreign
identifier_name
mod.rs
use std::slice; use libc::ptrdiff_t; use md5 as md5_crate; use sha2::{Digest, Sha224, Sha256, Sha384, Sha512}; use remacs_macros::lisp_fn; use crate::{ buffers::{LispBufferOrName, LispBufferRef}, lisp::LispObject, multibyte::LispStringRef, remacs_sys::EmacsInt, remacs_sys::{extract_data_from_object, make_uninit_string}, remacs_sys::{Qmd5, Qnil, Qsha1, Qsha224, Qsha256, Qsha384, Qsha512}, symbols::{symbol_name, LispSymbolRef}, threads::ThreadState, }; #[derive(Clone, Copy)] enum HashAlg { MD5, SHA1, SHA224, SHA256, SHA384, SHA512, } static MD5_DIGEST_LEN: usize = 16; static SHA1_DIGEST_LEN: usize = 20; static SHA224_DIGEST_LEN: usize = 224 / 8; static SHA256_DIGEST_LEN: usize = 256 / 8; static SHA384_DIGEST_LEN: usize = 384 / 8; static SHA512_DIGEST_LEN: usize = 512 / 8; fn hash_alg(algorithm: LispSymbolRef) -> HashAlg { match LispObject::from(algorithm) { Qmd5 => HashAlg::MD5, Qsha1 => HashAlg::SHA1, Qsha224 => HashAlg::SHA224, Qsha256 => HashAlg::SHA256, Qsha384 => HashAlg::SHA384, Qsha512 => HashAlg::SHA512, _ => { let name: LispStringRef = symbol_name(algorithm).into(); error!("Invalid algorithm arg: {:?}\0", &name.as_slice()); } } } /// Return MD5 message digest of OBJECT, a buffer or string. /// /// A message digest is a cryptographic checksum of a document, and the /// algorithm to calculate it is defined in RFC 1321. /// /// The two optional arguments START and END are character positions /// specifying for which part of OBJECT the message digest should be /// computed. If nil or omitted, the digest is computed for the whole /// OBJECT. /// /// The MD5 message digest is computed from the result of encoding the /// text in a coding system, not directly from the internal Emacs form of /// the text. The optional fourth argument CODING-SYSTEM specifies which /// coding system to encode the text with. It should be the same coding /// system that you used or will use when actually writing the text into a /// file. /// /// If CODING-SYSTEM is nil or omitted, the default depends on OBJECT. If /// OBJECT is a buffer, the default for CODING-SYSTEM is whatever coding /// system would be chosen by default for writing this text into a file. /// /// If OBJECT is a string, the most preferred coding system (see the /// command `prefer-coding-system') is used. /// /// If NOERROR is non-nil, silently assume the `raw-text' coding if the /// guesswork fails. Normally, an error is signaled in such case. #[lisp_fn(min = "1")] pub fn md5( object: LispObject, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, ) -> LispObject { _secure_hash( HashAlg::MD5, object, start, end, coding_system, noerror, Qnil, ) } /// Return the secure hash of OBJECT, a buffer or string. /// ALGORITHM is a symbol specifying the hash to use: /// md5, sha1, sha224, sha256, sha384 or sha512. /// /// The two optional arguments START and END are positions specifying for /// which part of OBJECT to compute the hash. If nil or omitted, uses the /// whole OBJECT. /// /// The full list of algorithms can be obtained with `secure-hash-algorithms'. /// /// If BINARY is non-nil, returns a string in binary form. #[lisp_fn(min = "2")] pub fn secure_hash( algorithm: LispSymbolRef, object: LispObject, start: LispObject, end: LispObject, binary: LispObject, ) -> LispObject { _secure_hash(hash_alg(algorithm), object, start, end, Qnil, Qnil, binary) } fn _secure_hash( algorithm: HashAlg, object: LispObject, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, binary: LispObject, ) -> LispObject { type HashFn = fn(&[u8], &mut [u8]); let spec = list!(object, start, end, coding_system, noerror); let mut start_byte: ptrdiff_t = 0; let mut end_byte: ptrdiff_t = 0; let input = unsafe { extract_data_from_object(spec, &mut start_byte, &mut end_byte) }; if input.is_null() { error!("secure_hash: failed to extract data from object, aborting!"); } let input_slice = unsafe { slice::from_raw_parts( input.offset(start_byte) as *mut u8, (end_byte - start_byte) as usize, ) }; let (digest_size, hash_func) = match algorithm { HashAlg::MD5 => (MD5_DIGEST_LEN, md5_buffer as HashFn), HashAlg::SHA1 => (SHA1_DIGEST_LEN, sha1_buffer as HashFn), HashAlg::SHA224 => (SHA224_DIGEST_LEN, sha224_buffer as HashFn), HashAlg::SHA256 => (SHA256_DIGEST_LEN, sha256_buffer as HashFn), HashAlg::SHA384 => (SHA384_DIGEST_LEN, sha384_buffer as HashFn), HashAlg::SHA512 => (SHA512_DIGEST_LEN, sha512_buffer as HashFn), }; let buffer_size = if binary.is_nil() { (digest_size * 2) as EmacsInt } else { digest_size as EmacsInt }; let digest = unsafe { make_uninit_string(buffer_size as EmacsInt) }; let mut digest_str: LispStringRef = digest.into(); hash_func(input_slice, digest_str.as_mut_slice()); if binary.is_nil() { hexify_digest_string(digest_str.as_mut_slice(), digest_size); } digest } /// To avoid a copy, buffer is both the source and the destination of /// this transformation. Buffer must contain len bytes of data and /// 2*len bytes of space for the final hex string. fn hexify_digest_string(buffer: &mut [u8], len: usize) { static hexdigit: [u8; 16] = *b"0123456789abcdef"; debug_assert_eq!( buffer.len(), 2 * len, "buffer must be long enough to hold 2*len hex digits" ); for i in (0..len).rev() { let v = buffer[i]; buffer[2 * i] = hexdigit[(v >> 4) as usize]; buffer[2 * i + 1] = hexdigit[(v & 0xf) as usize]; } } // For the following hash functions, the caller must ensure that the // destination buffer is at least long enough to hold the // digest. Additionally, the caller may have been asked to return a // hex string, in which case dest_buf will be twice as long as the // digest. fn md5_buffer(buffer: &[u8], dest_buf: &mut [u8]) { let output = md5_crate::compute(buffer); dest_buf[..output.len()].copy_from_slice(&*output) } fn sha1_buffer(buffer: &[u8], dest_buf: &mut [u8]) { let mut hasher = sha1::Sha1::new(); hasher.update(buffer); let output = hasher.digest().bytes(); dest_buf[..output.len()].copy_from_slice(&output) } /// Given an instance of `Digest`, and `buffer` write its hash to `dest_buf`. fn sha2_hash_buffer(hasher: impl Digest, buffer: &[u8], dest_buf: &mut [u8]) { let mut hasher = hasher; hasher.input(buffer); let output = hasher.result(); dest_buf[..output.len()].copy_from_slice(&output) } fn sha224_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha224::new(), buffer, dest_buf); } fn sha256_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha256::new(), buffer, dest_buf); } fn sha384_buffer(buffer: &[u8], dest_buf: &mut [u8])
fn sha512_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha512::new(), buffer, dest_buf); } /// Return a hash of the contents of BUFFER-OR-NAME. /// This hash is performed on the raw internal format of the buffer, /// disregarding any coding systems. If nil, use the current buffer. #[lisp_fn(min = "0")] pub fn buffer_hash(buffer_or_name: Option<LispBufferOrName>) -> LispObject { let b = buffer_or_name.map_or_else(ThreadState::current_buffer_unchecked, LispBufferRef::from); let mut ctx = sha1::Sha1::new(); ctx.update(unsafe { slice::from_raw_parts(b.beg_addr(), (b.gpt_byte() - b.beg_byte()) as usize) }); if b.gpt_byte() < b.z_byte() { ctx.update(unsafe { slice::from_raw_parts( b.gap_end_addr(), b.z_addr() as usize - b.gap_end_addr() as usize, ) }); } let formatted = ctx.digest().to_string(); let digest = unsafe { make_uninit_string(formatted.len() as EmacsInt) }; digest .as_string() .unwrap() .as_mut_slice() .copy_from_slice(formatted.as_bytes()); digest } include!(concat!(env!("OUT_DIR"), "/crypto_exports.rs"));
{ sha2_hash_buffer(Sha384::new(), buffer, dest_buf); }
identifier_body
mod.rs
use std::slice; use libc::ptrdiff_t; use md5 as md5_crate; use sha2::{Digest, Sha224, Sha256, Sha384, Sha512}; use remacs_macros::lisp_fn; use crate::{ buffers::{LispBufferOrName, LispBufferRef}, lisp::LispObject, multibyte::LispStringRef, remacs_sys::EmacsInt, remacs_sys::{extract_data_from_object, make_uninit_string}, remacs_sys::{Qmd5, Qnil, Qsha1, Qsha224, Qsha256, Qsha384, Qsha512}, symbols::{symbol_name, LispSymbolRef}, threads::ThreadState, }; #[derive(Clone, Copy)] enum HashAlg { MD5, SHA1, SHA224, SHA256, SHA384, SHA512, } static MD5_DIGEST_LEN: usize = 16; static SHA1_DIGEST_LEN: usize = 20; static SHA224_DIGEST_LEN: usize = 224 / 8; static SHA256_DIGEST_LEN: usize = 256 / 8; static SHA384_DIGEST_LEN: usize = 384 / 8; static SHA512_DIGEST_LEN: usize = 512 / 8; fn hash_alg(algorithm: LispSymbolRef) -> HashAlg { match LispObject::from(algorithm) { Qmd5 => HashAlg::MD5, Qsha1 => HashAlg::SHA1, Qsha224 => HashAlg::SHA224, Qsha256 => HashAlg::SHA256, Qsha384 => HashAlg::SHA384, Qsha512 => HashAlg::SHA512, _ => { let name: LispStringRef = symbol_name(algorithm).into(); error!("Invalid algorithm arg: {:?}\0", &name.as_slice()); } } } /// Return MD5 message digest of OBJECT, a buffer or string. /// /// A message digest is a cryptographic checksum of a document, and the /// algorithm to calculate it is defined in RFC 1321. /// /// The two optional arguments START and END are character positions /// specifying for which part of OBJECT the message digest should be /// computed. If nil or omitted, the digest is computed for the whole /// OBJECT. /// /// The MD5 message digest is computed from the result of encoding the /// text in a coding system, not directly from the internal Emacs form of /// the text. The optional fourth argument CODING-SYSTEM specifies which /// coding system to encode the text with. It should be the same coding /// system that you used or will use when actually writing the text into a /// file. /// /// If CODING-SYSTEM is nil or omitted, the default depends on OBJECT. If /// OBJECT is a buffer, the default for CODING-SYSTEM is whatever coding /// system would be chosen by default for writing this text into a file. /// /// If OBJECT is a string, the most preferred coding system (see the /// command `prefer-coding-system') is used. /// /// If NOERROR is non-nil, silently assume the `raw-text' coding if the /// guesswork fails. Normally, an error is signaled in such case. #[lisp_fn(min = "1")] pub fn md5( object: LispObject, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, ) -> LispObject { _secure_hash( HashAlg::MD5, object, start, end, coding_system, noerror, Qnil, ) } /// Return the secure hash of OBJECT, a buffer or string. /// ALGORITHM is a symbol specifying the hash to use: /// md5, sha1, sha224, sha256, sha384 or sha512. /// /// The two optional arguments START and END are positions specifying for /// which part of OBJECT to compute the hash. If nil or omitted, uses the /// whole OBJECT. /// /// The full list of algorithms can be obtained with `secure-hash-algorithms'. /// /// If BINARY is non-nil, returns a string in binary form. #[lisp_fn(min = "2")] pub fn secure_hash( algorithm: LispSymbolRef, object: LispObject, start: LispObject, end: LispObject, binary: LispObject, ) -> LispObject { _secure_hash(hash_alg(algorithm), object, start, end, Qnil, Qnil, binary) } fn _secure_hash( algorithm: HashAlg, object: LispObject, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, binary: LispObject, ) -> LispObject { type HashFn = fn(&[u8], &mut [u8]); let spec = list!(object, start, end, coding_system, noerror); let mut start_byte: ptrdiff_t = 0; let mut end_byte: ptrdiff_t = 0; let input = unsafe { extract_data_from_object(spec, &mut start_byte, &mut end_byte) }; if input.is_null() { error!("secure_hash: failed to extract data from object, aborting!"); } let input_slice = unsafe { slice::from_raw_parts( input.offset(start_byte) as *mut u8, (end_byte - start_byte) as usize, ) }; let (digest_size, hash_func) = match algorithm { HashAlg::MD5 => (MD5_DIGEST_LEN, md5_buffer as HashFn), HashAlg::SHA1 => (SHA1_DIGEST_LEN, sha1_buffer as HashFn), HashAlg::SHA224 => (SHA224_DIGEST_LEN, sha224_buffer as HashFn), HashAlg::SHA256 => (SHA256_DIGEST_LEN, sha256_buffer as HashFn), HashAlg::SHA384 => (SHA384_DIGEST_LEN, sha384_buffer as HashFn), HashAlg::SHA512 => (SHA512_DIGEST_LEN, sha512_buffer as HashFn), }; let buffer_size = if binary.is_nil() { (digest_size * 2) as EmacsInt } else { digest_size as EmacsInt }; let digest = unsafe { make_uninit_string(buffer_size as EmacsInt) }; let mut digest_str: LispStringRef = digest.into(); hash_func(input_slice, digest_str.as_mut_slice()); if binary.is_nil() { hexify_digest_string(digest_str.as_mut_slice(), digest_size); } digest } /// To avoid a copy, buffer is both the source and the destination of /// this transformation. Buffer must contain len bytes of data and /// 2*len bytes of space for the final hex string. fn hexify_digest_string(buffer: &mut [u8], len: usize) { static hexdigit: [u8; 16] = *b"0123456789abcdef"; debug_assert_eq!( buffer.len(), 2 * len, "buffer must be long enough to hold 2*len hex digits" ); for i in (0..len).rev() { let v = buffer[i]; buffer[2 * i] = hexdigit[(v >> 4) as usize]; buffer[2 * i + 1] = hexdigit[(v & 0xf) as usize]; } } // For the following hash functions, the caller must ensure that the // destination buffer is at least long enough to hold the // digest. Additionally, the caller may have been asked to return a // hex string, in which case dest_buf will be twice as long as the // digest. fn md5_buffer(buffer: &[u8], dest_buf: &mut [u8]) { let output = md5_crate::compute(buffer); dest_buf[..output.len()].copy_from_slice(&*output) } fn sha1_buffer(buffer: &[u8], dest_buf: &mut [u8]) { let mut hasher = sha1::Sha1::new(); hasher.update(buffer); let output = hasher.digest().bytes(); dest_buf[..output.len()].copy_from_slice(&output) } /// Given an instance of `Digest`, and `buffer` write its hash to `dest_buf`. fn sha2_hash_buffer(hasher: impl Digest, buffer: &[u8], dest_buf: &mut [u8]) { let mut hasher = hasher; hasher.input(buffer); let output = hasher.result(); dest_buf[..output.len()].copy_from_slice(&output) }
sha2_hash_buffer(Sha224::new(), buffer, dest_buf); } fn sha256_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha256::new(), buffer, dest_buf); } fn sha384_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha384::new(), buffer, dest_buf); } fn sha512_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha512::new(), buffer, dest_buf); } /// Return a hash of the contents of BUFFER-OR-NAME. /// This hash is performed on the raw internal format of the buffer, /// disregarding any coding systems. If nil, use the current buffer. #[lisp_fn(min = "0")] pub fn buffer_hash(buffer_or_name: Option<LispBufferOrName>) -> LispObject { let b = buffer_or_name.map_or_else(ThreadState::current_buffer_unchecked, LispBufferRef::from); let mut ctx = sha1::Sha1::new(); ctx.update(unsafe { slice::from_raw_parts(b.beg_addr(), (b.gpt_byte() - b.beg_byte()) as usize) }); if b.gpt_byte() < b.z_byte() { ctx.update(unsafe { slice::from_raw_parts( b.gap_end_addr(), b.z_addr() as usize - b.gap_end_addr() as usize, ) }); } let formatted = ctx.digest().to_string(); let digest = unsafe { make_uninit_string(formatted.len() as EmacsInt) }; digest .as_string() .unwrap() .as_mut_slice() .copy_from_slice(formatted.as_bytes()); digest } include!(concat!(env!("OUT_DIR"), "/crypto_exports.rs"));
fn sha224_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
random_line_split
mod.rs
use std::slice; use libc::ptrdiff_t; use md5 as md5_crate; use sha2::{Digest, Sha224, Sha256, Sha384, Sha512}; use remacs_macros::lisp_fn; use crate::{ buffers::{LispBufferOrName, LispBufferRef}, lisp::LispObject, multibyte::LispStringRef, remacs_sys::EmacsInt, remacs_sys::{extract_data_from_object, make_uninit_string}, remacs_sys::{Qmd5, Qnil, Qsha1, Qsha224, Qsha256, Qsha384, Qsha512}, symbols::{symbol_name, LispSymbolRef}, threads::ThreadState, }; #[derive(Clone, Copy)] enum HashAlg { MD5, SHA1, SHA224, SHA256, SHA384, SHA512, } static MD5_DIGEST_LEN: usize = 16; static SHA1_DIGEST_LEN: usize = 20; static SHA224_DIGEST_LEN: usize = 224 / 8; static SHA256_DIGEST_LEN: usize = 256 / 8; static SHA384_DIGEST_LEN: usize = 384 / 8; static SHA512_DIGEST_LEN: usize = 512 / 8; fn hash_alg(algorithm: LispSymbolRef) -> HashAlg { match LispObject::from(algorithm) { Qmd5 => HashAlg::MD5, Qsha1 => HashAlg::SHA1, Qsha224 => HashAlg::SHA224, Qsha256 => HashAlg::SHA256, Qsha384 => HashAlg::SHA384, Qsha512 => HashAlg::SHA512, _ => { let name: LispStringRef = symbol_name(algorithm).into(); error!("Invalid algorithm arg: {:?}\0", &name.as_slice()); } } } /// Return MD5 message digest of OBJECT, a buffer or string. /// /// A message digest is a cryptographic checksum of a document, and the /// algorithm to calculate it is defined in RFC 1321. /// /// The two optional arguments START and END are character positions /// specifying for which part of OBJECT the message digest should be /// computed. If nil or omitted, the digest is computed for the whole /// OBJECT. /// /// The MD5 message digest is computed from the result of encoding the /// text in a coding system, not directly from the internal Emacs form of /// the text. The optional fourth argument CODING-SYSTEM specifies which /// coding system to encode the text with. It should be the same coding /// system that you used or will use when actually writing the text into a /// file. /// /// If CODING-SYSTEM is nil or omitted, the default depends on OBJECT. If /// OBJECT is a buffer, the default for CODING-SYSTEM is whatever coding /// system would be chosen by default for writing this text into a file. /// /// If OBJECT is a string, the most preferred coding system (see the /// command `prefer-coding-system') is used. /// /// If NOERROR is non-nil, silently assume the `raw-text' coding if the /// guesswork fails. Normally, an error is signaled in such case. #[lisp_fn(min = "1")] pub fn md5( object: LispObject, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, ) -> LispObject { _secure_hash( HashAlg::MD5, object, start, end, coding_system, noerror, Qnil, ) } /// Return the secure hash of OBJECT, a buffer or string. /// ALGORITHM is a symbol specifying the hash to use: /// md5, sha1, sha224, sha256, sha384 or sha512. /// /// The two optional arguments START and END are positions specifying for /// which part of OBJECT to compute the hash. If nil or omitted, uses the /// whole OBJECT. /// /// The full list of algorithms can be obtained with `secure-hash-algorithms'. /// /// If BINARY is non-nil, returns a string in binary form. #[lisp_fn(min = "2")] pub fn secure_hash( algorithm: LispSymbolRef, object: LispObject, start: LispObject, end: LispObject, binary: LispObject, ) -> LispObject { _secure_hash(hash_alg(algorithm), object, start, end, Qnil, Qnil, binary) } fn
( algorithm: HashAlg, object: LispObject, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, binary: LispObject, ) -> LispObject { type HashFn = fn(&[u8], &mut [u8]); let spec = list!(object, start, end, coding_system, noerror); let mut start_byte: ptrdiff_t = 0; let mut end_byte: ptrdiff_t = 0; let input = unsafe { extract_data_from_object(spec, &mut start_byte, &mut end_byte) }; if input.is_null() { error!("secure_hash: failed to extract data from object, aborting!"); } let input_slice = unsafe { slice::from_raw_parts( input.offset(start_byte) as *mut u8, (end_byte - start_byte) as usize, ) }; let (digest_size, hash_func) = match algorithm { HashAlg::MD5 => (MD5_DIGEST_LEN, md5_buffer as HashFn), HashAlg::SHA1 => (SHA1_DIGEST_LEN, sha1_buffer as HashFn), HashAlg::SHA224 => (SHA224_DIGEST_LEN, sha224_buffer as HashFn), HashAlg::SHA256 => (SHA256_DIGEST_LEN, sha256_buffer as HashFn), HashAlg::SHA384 => (SHA384_DIGEST_LEN, sha384_buffer as HashFn), HashAlg::SHA512 => (SHA512_DIGEST_LEN, sha512_buffer as HashFn), }; let buffer_size = if binary.is_nil() { (digest_size * 2) as EmacsInt } else { digest_size as EmacsInt }; let digest = unsafe { make_uninit_string(buffer_size as EmacsInt) }; let mut digest_str: LispStringRef = digest.into(); hash_func(input_slice, digest_str.as_mut_slice()); if binary.is_nil() { hexify_digest_string(digest_str.as_mut_slice(), digest_size); } digest } /// To avoid a copy, buffer is both the source and the destination of /// this transformation. Buffer must contain len bytes of data and /// 2*len bytes of space for the final hex string. fn hexify_digest_string(buffer: &mut [u8], len: usize) { static hexdigit: [u8; 16] = *b"0123456789abcdef"; debug_assert_eq!( buffer.len(), 2 * len, "buffer must be long enough to hold 2*len hex digits" ); for i in (0..len).rev() { let v = buffer[i]; buffer[2 * i] = hexdigit[(v >> 4) as usize]; buffer[2 * i + 1] = hexdigit[(v & 0xf) as usize]; } } // For the following hash functions, the caller must ensure that the // destination buffer is at least long enough to hold the // digest. Additionally, the caller may have been asked to return a // hex string, in which case dest_buf will be twice as long as the // digest. fn md5_buffer(buffer: &[u8], dest_buf: &mut [u8]) { let output = md5_crate::compute(buffer); dest_buf[..output.len()].copy_from_slice(&*output) } fn sha1_buffer(buffer: &[u8], dest_buf: &mut [u8]) { let mut hasher = sha1::Sha1::new(); hasher.update(buffer); let output = hasher.digest().bytes(); dest_buf[..output.len()].copy_from_slice(&output) } /// Given an instance of `Digest`, and `buffer` write its hash to `dest_buf`. fn sha2_hash_buffer(hasher: impl Digest, buffer: &[u8], dest_buf: &mut [u8]) { let mut hasher = hasher; hasher.input(buffer); let output = hasher.result(); dest_buf[..output.len()].copy_from_slice(&output) } fn sha224_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha224::new(), buffer, dest_buf); } fn sha256_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha256::new(), buffer, dest_buf); } fn sha384_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha384::new(), buffer, dest_buf); } fn sha512_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha512::new(), buffer, dest_buf); } /// Return a hash of the contents of BUFFER-OR-NAME. /// This hash is performed on the raw internal format of the buffer, /// disregarding any coding systems. If nil, use the current buffer. #[lisp_fn(min = "0")] pub fn buffer_hash(buffer_or_name: Option<LispBufferOrName>) -> LispObject { let b = buffer_or_name.map_or_else(ThreadState::current_buffer_unchecked, LispBufferRef::from); let mut ctx = sha1::Sha1::new(); ctx.update(unsafe { slice::from_raw_parts(b.beg_addr(), (b.gpt_byte() - b.beg_byte()) as usize) }); if b.gpt_byte() < b.z_byte() { ctx.update(unsafe { slice::from_raw_parts( b.gap_end_addr(), b.z_addr() as usize - b.gap_end_addr() as usize, ) }); } let formatted = ctx.digest().to_string(); let digest = unsafe { make_uninit_string(formatted.len() as EmacsInt) }; digest .as_string() .unwrap() .as_mut_slice() .copy_from_slice(formatted.as_bytes()); digest } include!(concat!(env!("OUT_DIR"), "/crypto_exports.rs"));
_secure_hash
identifier_name
mod.rs
use std::slice; use libc::ptrdiff_t; use md5 as md5_crate; use sha2::{Digest, Sha224, Sha256, Sha384, Sha512}; use remacs_macros::lisp_fn; use crate::{ buffers::{LispBufferOrName, LispBufferRef}, lisp::LispObject, multibyte::LispStringRef, remacs_sys::EmacsInt, remacs_sys::{extract_data_from_object, make_uninit_string}, remacs_sys::{Qmd5, Qnil, Qsha1, Qsha224, Qsha256, Qsha384, Qsha512}, symbols::{symbol_name, LispSymbolRef}, threads::ThreadState, }; #[derive(Clone, Copy)] enum HashAlg { MD5, SHA1, SHA224, SHA256, SHA384, SHA512, } static MD5_DIGEST_LEN: usize = 16; static SHA1_DIGEST_LEN: usize = 20; static SHA224_DIGEST_LEN: usize = 224 / 8; static SHA256_DIGEST_LEN: usize = 256 / 8; static SHA384_DIGEST_LEN: usize = 384 / 8; static SHA512_DIGEST_LEN: usize = 512 / 8; fn hash_alg(algorithm: LispSymbolRef) -> HashAlg { match LispObject::from(algorithm) { Qmd5 => HashAlg::MD5, Qsha1 => HashAlg::SHA1, Qsha224 => HashAlg::SHA224, Qsha256 => HashAlg::SHA256, Qsha384 => HashAlg::SHA384, Qsha512 => HashAlg::SHA512, _ => { let name: LispStringRef = symbol_name(algorithm).into(); error!("Invalid algorithm arg: {:?}\0", &name.as_slice()); } } } /// Return MD5 message digest of OBJECT, a buffer or string. /// /// A message digest is a cryptographic checksum of a document, and the /// algorithm to calculate it is defined in RFC 1321. /// /// The two optional arguments START and END are character positions /// specifying for which part of OBJECT the message digest should be /// computed. If nil or omitted, the digest is computed for the whole /// OBJECT. /// /// The MD5 message digest is computed from the result of encoding the /// text in a coding system, not directly from the internal Emacs form of /// the text. The optional fourth argument CODING-SYSTEM specifies which /// coding system to encode the text with. It should be the same coding /// system that you used or will use when actually writing the text into a /// file. /// /// If CODING-SYSTEM is nil or omitted, the default depends on OBJECT. If /// OBJECT is a buffer, the default for CODING-SYSTEM is whatever coding /// system would be chosen by default for writing this text into a file. /// /// If OBJECT is a string, the most preferred coding system (see the /// command `prefer-coding-system') is used. /// /// If NOERROR is non-nil, silently assume the `raw-text' coding if the /// guesswork fails. Normally, an error is signaled in such case. #[lisp_fn(min = "1")] pub fn md5( object: LispObject, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, ) -> LispObject { _secure_hash( HashAlg::MD5, object, start, end, coding_system, noerror, Qnil, ) } /// Return the secure hash of OBJECT, a buffer or string. /// ALGORITHM is a symbol specifying the hash to use: /// md5, sha1, sha224, sha256, sha384 or sha512. /// /// The two optional arguments START and END are positions specifying for /// which part of OBJECT to compute the hash. If nil or omitted, uses the /// whole OBJECT. /// /// The full list of algorithms can be obtained with `secure-hash-algorithms'. /// /// If BINARY is non-nil, returns a string in binary form. #[lisp_fn(min = "2")] pub fn secure_hash( algorithm: LispSymbolRef, object: LispObject, start: LispObject, end: LispObject, binary: LispObject, ) -> LispObject { _secure_hash(hash_alg(algorithm), object, start, end, Qnil, Qnil, binary) } fn _secure_hash( algorithm: HashAlg, object: LispObject, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, binary: LispObject, ) -> LispObject { type HashFn = fn(&[u8], &mut [u8]); let spec = list!(object, start, end, coding_system, noerror); let mut start_byte: ptrdiff_t = 0; let mut end_byte: ptrdiff_t = 0; let input = unsafe { extract_data_from_object(spec, &mut start_byte, &mut end_byte) }; if input.is_null() { error!("secure_hash: failed to extract data from object, aborting!"); } let input_slice = unsafe { slice::from_raw_parts( input.offset(start_byte) as *mut u8, (end_byte - start_byte) as usize, ) }; let (digest_size, hash_func) = match algorithm { HashAlg::MD5 => (MD5_DIGEST_LEN, md5_buffer as HashFn), HashAlg::SHA1 => (SHA1_DIGEST_LEN, sha1_buffer as HashFn), HashAlg::SHA224 => (SHA224_DIGEST_LEN, sha224_buffer as HashFn), HashAlg::SHA256 => (SHA256_DIGEST_LEN, sha256_buffer as HashFn), HashAlg::SHA384 => (SHA384_DIGEST_LEN, sha384_buffer as HashFn), HashAlg::SHA512 => (SHA512_DIGEST_LEN, sha512_buffer as HashFn), }; let buffer_size = if binary.is_nil() { (digest_size * 2) as EmacsInt } else { digest_size as EmacsInt }; let digest = unsafe { make_uninit_string(buffer_size as EmacsInt) }; let mut digest_str: LispStringRef = digest.into(); hash_func(input_slice, digest_str.as_mut_slice()); if binary.is_nil()
digest } /// To avoid a copy, buffer is both the source and the destination of /// this transformation. Buffer must contain len bytes of data and /// 2*len bytes of space for the final hex string. fn hexify_digest_string(buffer: &mut [u8], len: usize) { static hexdigit: [u8; 16] = *b"0123456789abcdef"; debug_assert_eq!( buffer.len(), 2 * len, "buffer must be long enough to hold 2*len hex digits" ); for i in (0..len).rev() { let v = buffer[i]; buffer[2 * i] = hexdigit[(v >> 4) as usize]; buffer[2 * i + 1] = hexdigit[(v & 0xf) as usize]; } } // For the following hash functions, the caller must ensure that the // destination buffer is at least long enough to hold the // digest. Additionally, the caller may have been asked to return a // hex string, in which case dest_buf will be twice as long as the // digest. fn md5_buffer(buffer: &[u8], dest_buf: &mut [u8]) { let output = md5_crate::compute(buffer); dest_buf[..output.len()].copy_from_slice(&*output) } fn sha1_buffer(buffer: &[u8], dest_buf: &mut [u8]) { let mut hasher = sha1::Sha1::new(); hasher.update(buffer); let output = hasher.digest().bytes(); dest_buf[..output.len()].copy_from_slice(&output) } /// Given an instance of `Digest`, and `buffer` write its hash to `dest_buf`. fn sha2_hash_buffer(hasher: impl Digest, buffer: &[u8], dest_buf: &mut [u8]) { let mut hasher = hasher; hasher.input(buffer); let output = hasher.result(); dest_buf[..output.len()].copy_from_slice(&output) } fn sha224_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha224::new(), buffer, dest_buf); } fn sha256_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha256::new(), buffer, dest_buf); } fn sha384_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha384::new(), buffer, dest_buf); } fn sha512_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha512::new(), buffer, dest_buf); } /// Return a hash of the contents of BUFFER-OR-NAME. /// This hash is performed on the raw internal format of the buffer, /// disregarding any coding systems. If nil, use the current buffer. #[lisp_fn(min = "0")] pub fn buffer_hash(buffer_or_name: Option<LispBufferOrName>) -> LispObject { let b = buffer_or_name.map_or_else(ThreadState::current_buffer_unchecked, LispBufferRef::from); let mut ctx = sha1::Sha1::new(); ctx.update(unsafe { slice::from_raw_parts(b.beg_addr(), (b.gpt_byte() - b.beg_byte()) as usize) }); if b.gpt_byte() < b.z_byte() { ctx.update(unsafe { slice::from_raw_parts( b.gap_end_addr(), b.z_addr() as usize - b.gap_end_addr() as usize, ) }); } let formatted = ctx.digest().to_string(); let digest = unsafe { make_uninit_string(formatted.len() as EmacsInt) }; digest .as_string() .unwrap() .as_mut_slice() .copy_from_slice(formatted.as_bytes()); digest } include!(concat!(env!("OUT_DIR"), "/crypto_exports.rs"));
{ hexify_digest_string(digest_str.as_mut_slice(), digest_size); }
conditional_block
a.rs
use std::fs::File; use std::io::prelude::*; use std::io::BufReader; use std::error::Error; static KEYPAD_MAP: [[u8; 3]; 3] = [ [1, 2, 3], [4, 5, 6], [7, 8, 9]]; struct Turtle { x: i8, y: i8, code: String, } impl Turtle { fn new() -> Turtle { Turtle { x: 1, y: 1, code: String::new(), }
} fn set_x(&mut self, x: i8) { if self.x + x <= 2 && self.x + x >= 0 { self.x = self.x + x; } } fn set_y(&mut self, y: i8) { if self.y + y <= 2 && self.y + y >= 0{ self.y = self.y + y; } } fn move_turtle(&mut self, direction: &char) { match direction.to_lowercase().collect::<String>().as_str() { "r" => self.set_x(1), "l" => self.set_x(-1), "u" => self.set_y(-1), "d" => self.set_y(1), _ => panic!("not a valid direction!"), }; } fn press_button(&mut self) { self.code.push_str(KEYPAD_MAP[self.y as usize][self.x as usize].to_string().as_str()); } } fn main() { let mut turtle = Turtle::new(); let f = match File::open("input.txt") { Err(e) => panic!("open failed: {}", e.description()), Ok(file) => file, }; let reader = BufReader::new(f); for line in reader.lines() { let line: String = line.unwrap(); if!line.is_empty() { let digit: Vec<char> = line.trim().chars().collect(); //--------- may req different order for step in &digit { turtle.move_turtle(step); } turtle.press_button(); } } println!("code: {}", turtle.code); }
random_line_split
a.rs
use std::fs::File; use std::io::prelude::*; use std::io::BufReader; use std::error::Error; static KEYPAD_MAP: [[u8; 3]; 3] = [ [1, 2, 3], [4, 5, 6], [7, 8, 9]]; struct Turtle { x: i8, y: i8, code: String, } impl Turtle { fn new() -> Turtle { Turtle { x: 1, y: 1, code: String::new(), } } fn set_x(&mut self, x: i8) { if self.x + x <= 2 && self.x + x >= 0 { self.x = self.x + x; } } fn set_y(&mut self, y: i8) { if self.y + y <= 2 && self.y + y >= 0{ self.y = self.y + y; } } fn move_turtle(&mut self, direction: &char)
fn press_button(&mut self) { self.code.push_str(KEYPAD_MAP[self.y as usize][self.x as usize].to_string().as_str()); } } fn main() { let mut turtle = Turtle::new(); let f = match File::open("input.txt") { Err(e) => panic!("open failed: {}", e.description()), Ok(file) => file, }; let reader = BufReader::new(f); for line in reader.lines() { let line: String = line.unwrap(); if!line.is_empty() { let digit: Vec<char> = line.trim().chars().collect(); //--------- may req different order for step in &digit { turtle.move_turtle(step); } turtle.press_button(); } } println!("code: {}", turtle.code); }
{ match direction.to_lowercase().collect::<String>().as_str() { "r" => self.set_x(1), "l" => self.set_x(-1), "u" => self.set_y(-1), "d" => self.set_y(1), _ => panic!("not a valid direction!"), }; }
identifier_body
a.rs
use std::fs::File; use std::io::prelude::*; use std::io::BufReader; use std::error::Error; static KEYPAD_MAP: [[u8; 3]; 3] = [ [1, 2, 3], [4, 5, 6], [7, 8, 9]]; struct Turtle { x: i8, y: i8, code: String, } impl Turtle { fn new() -> Turtle { Turtle { x: 1, y: 1, code: String::new(), } } fn set_x(&mut self, x: i8) { if self.x + x <= 2 && self.x + x >= 0 { self.x = self.x + x; } } fn set_y(&mut self, y: i8) { if self.y + y <= 2 && self.y + y >= 0{ self.y = self.y + y; } } fn
(&mut self, direction: &char) { match direction.to_lowercase().collect::<String>().as_str() { "r" => self.set_x(1), "l" => self.set_x(-1), "u" => self.set_y(-1), "d" => self.set_y(1), _ => panic!("not a valid direction!"), }; } fn press_button(&mut self) { self.code.push_str(KEYPAD_MAP[self.y as usize][self.x as usize].to_string().as_str()); } } fn main() { let mut turtle = Turtle::new(); let f = match File::open("input.txt") { Err(e) => panic!("open failed: {}", e.description()), Ok(file) => file, }; let reader = BufReader::new(f); for line in reader.lines() { let line: String = line.unwrap(); if!line.is_empty() { let digit: Vec<char> = line.trim().chars().collect(); //--------- may req different order for step in &digit { turtle.move_turtle(step); } turtle.press_button(); } } println!("code: {}", turtle.code); }
move_turtle
identifier_name
atomic_nand_acq.rs
#![feature(core, core_intrinsics)] extern crate core; #[cfg(test)] mod tests { use core::intrinsics::atomic_nand_acq; use core::cell::UnsafeCell; use std::sync::Arc; use std::thread;
// pub fn atomic_nand_acq<T>(dst: *mut T, src: T) -> T; struct A<T> { v: UnsafeCell<T> } unsafe impl Sync for A<T> {} impl<T> A<T> { fn new(v: T) -> A<T> { A { v: UnsafeCell::<T>::new(v) } } } type T = usize; macro_rules! atomic_nand_acq_test { ($init:expr, $value:expr, $result:expr) => ({ let value: T = $init; let a: A<T> = A::<T>::new(value); let data: Arc<A<T>> = Arc::<A<T>>::new(a); let clone: Arc<A<T>> = data.clone(); thread::spawn(move || { let dst: *mut T = clone.v.get(); let src: T = $value; let old: T = unsafe { atomic_nand_acq::<T>(dst, src) }; assert_eq!(old, $init); }); thread::sleep_ms(10); let ptr: *mut T = data.v.get(); assert_eq!(unsafe { *ptr }, $result); }) } #[test] fn atomic_nand_acq_test1() { atomic_nand_acq_test!( 0xff00, 0x0a0a,!0x0a00 ); } }
random_line_split
atomic_nand_acq.rs
#![feature(core, core_intrinsics)] extern crate core; #[cfg(test)] mod tests { use core::intrinsics::atomic_nand_acq; use core::cell::UnsafeCell; use std::sync::Arc; use std::thread; // pub fn atomic_nand_acq<T>(dst: *mut T, src: T) -> T; struct A<T> { v: UnsafeCell<T> } unsafe impl Sync for A<T> {} impl<T> A<T> { fn
(v: T) -> A<T> { A { v: UnsafeCell::<T>::new(v) } } } type T = usize; macro_rules! atomic_nand_acq_test { ($init:expr, $value:expr, $result:expr) => ({ let value: T = $init; let a: A<T> = A::<T>::new(value); let data: Arc<A<T>> = Arc::<A<T>>::new(a); let clone: Arc<A<T>> = data.clone(); thread::spawn(move || { let dst: *mut T = clone.v.get(); let src: T = $value; let old: T = unsafe { atomic_nand_acq::<T>(dst, src) }; assert_eq!(old, $init); }); thread::sleep_ms(10); let ptr: *mut T = data.v.get(); assert_eq!(unsafe { *ptr }, $result); }) } #[test] fn atomic_nand_acq_test1() { atomic_nand_acq_test!( 0xff00, 0x0a0a,!0x0a00 ); } }
new
identifier_name
headless.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use compositing::*; use geom::size::Size2D; use servo_msg::constellation_msg::{ConstellationChan, ExitMsg, ResizedWindowMsg}; use std::comm::{Empty, Disconnected, Data, Receiver}; use servo_util::time::ProfilerChan; use servo_util::time; /// Starts the compositor, which listens for messages on the specified port. /// /// This is the null compositor which doesn't draw anything to the screen. /// It's intended for headless testing. pub struct NullCompositor { /// The port on which we receive messages. port: Receiver<Msg>, } impl NullCompositor { fn new(port: Receiver<Msg>) -> NullCompositor { NullCompositor { port: port, } } pub fn create(port: Receiver<Msg>, constellation_chan: ConstellationChan, profiler_chan: ProfilerChan)
} fn handle_message(&self, constellation_chan: ConstellationChan) { loop { match self.port.recv() { Exit(chan) => { debug!("shutting down the constellation"); let ConstellationChan(ref con_chan) = constellation_chan; con_chan.send(ExitMsg); chan.send(()); } ShutdownComplete => { debug!("constellation completed shutdown"); break } GetGraphicsMetadata(chan) => { chan.send(None); } SetIds(_, response_chan, _) => { response_chan.send(()); } // Explicitly list ignored messages so that when we add a new one, // we'll notice and think about whether it needs a response, like // SetIds. CreateRootCompositorLayerIfNecessary(..) | CreateDescendantCompositorLayerIfNecessary(..) | SetLayerPageSize(..) | SetLayerClipRect(..) | DeleteLayerGroup(..) | Paint(..) | InvalidateRect(..) | ChangeReadyState(..) | ChangeRenderState(..) | ScrollFragmentPoint(..) | SetUnRenderedColor(..) | LoadComplete(..) => () } } } }
{ let compositor = NullCompositor::new(port); // Tell the constellation about the initial fake size. { let ConstellationChan(ref chan) = constellation_chan; chan.send(ResizedWindowMsg(Size2D(640u, 480u))); } compositor.handle_message(constellation_chan); // Drain compositor port, sometimes messages contain channels that are blocking // another task from finishing (i.e. SetIds) loop { match compositor.port.try_recv() { Empty | Disconnected => break, Data(_) => {}, } } profiler_chan.send(time::ExitMsg);
identifier_body
headless.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use compositing::*; use geom::size::Size2D; use servo_msg::constellation_msg::{ConstellationChan, ExitMsg, ResizedWindowMsg}; use std::comm::{Empty, Disconnected, Data, Receiver}; use servo_util::time::ProfilerChan; use servo_util::time; /// Starts the compositor, which listens for messages on the specified port. /// /// This is the null compositor which doesn't draw anything to the screen. /// It's intended for headless testing. pub struct NullCompositor { /// The port on which we receive messages. port: Receiver<Msg>, } impl NullCompositor { fn new(port: Receiver<Msg>) -> NullCompositor { NullCompositor { port: port, } } pub fn create(port: Receiver<Msg>, constellation_chan: ConstellationChan, profiler_chan: ProfilerChan) { let compositor = NullCompositor::new(port); // Tell the constellation about the initial fake size. { let ConstellationChan(ref chan) = constellation_chan; chan.send(ResizedWindowMsg(Size2D(640u, 480u))); } compositor.handle_message(constellation_chan); // Drain compositor port, sometimes messages contain channels that are blocking // another task from finishing (i.e. SetIds) loop { match compositor.port.try_recv() { Empty | Disconnected => break, Data(_) => {}, } } profiler_chan.send(time::ExitMsg); } fn
(&self, constellation_chan: ConstellationChan) { loop { match self.port.recv() { Exit(chan) => { debug!("shutting down the constellation"); let ConstellationChan(ref con_chan) = constellation_chan; con_chan.send(ExitMsg); chan.send(()); } ShutdownComplete => { debug!("constellation completed shutdown"); break } GetGraphicsMetadata(chan) => { chan.send(None); } SetIds(_, response_chan, _) => { response_chan.send(()); } // Explicitly list ignored messages so that when we add a new one, // we'll notice and think about whether it needs a response, like // SetIds. CreateRootCompositorLayerIfNecessary(..) | CreateDescendantCompositorLayerIfNecessary(..) | SetLayerPageSize(..) | SetLayerClipRect(..) | DeleteLayerGroup(..) | Paint(..) | InvalidateRect(..) | ChangeReadyState(..) | ChangeRenderState(..) | ScrollFragmentPoint(..) | SetUnRenderedColor(..) | LoadComplete(..) => () } } } }
handle_message
identifier_name
headless.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use compositing::*; use geom::size::Size2D; use servo_msg::constellation_msg::{ConstellationChan, ExitMsg, ResizedWindowMsg}; use std::comm::{Empty, Disconnected, Data, Receiver}; use servo_util::time::ProfilerChan; use servo_util::time; /// Starts the compositor, which listens for messages on the specified port. /// /// This is the null compositor which doesn't draw anything to the screen. /// It's intended for headless testing. pub struct NullCompositor { /// The port on which we receive messages. port: Receiver<Msg>, } impl NullCompositor { fn new(port: Receiver<Msg>) -> NullCompositor { NullCompositor { port: port, } } pub fn create(port: Receiver<Msg>, constellation_chan: ConstellationChan, profiler_chan: ProfilerChan) { let compositor = NullCompositor::new(port); // Tell the constellation about the initial fake size. { let ConstellationChan(ref chan) = constellation_chan; chan.send(ResizedWindowMsg(Size2D(640u, 480u))); } compositor.handle_message(constellation_chan); // Drain compositor port, sometimes messages contain channels that are blocking // another task from finishing (i.e. SetIds) loop { match compositor.port.try_recv() { Empty | Disconnected => break, Data(_) => {}, } } profiler_chan.send(time::ExitMsg); } fn handle_message(&self, constellation_chan: ConstellationChan) { loop { match self.port.recv() { Exit(chan) => { debug!("shutting down the constellation"); let ConstellationChan(ref con_chan) = constellation_chan; con_chan.send(ExitMsg); chan.send(()); } ShutdownComplete => { debug!("constellation completed shutdown"); break } GetGraphicsMetadata(chan) => { chan.send(None); }
// Explicitly list ignored messages so that when we add a new one, // we'll notice and think about whether it needs a response, like // SetIds. CreateRootCompositorLayerIfNecessary(..) | CreateDescendantCompositorLayerIfNecessary(..) | SetLayerPageSize(..) | SetLayerClipRect(..) | DeleteLayerGroup(..) | Paint(..) | InvalidateRect(..) | ChangeReadyState(..) | ChangeRenderState(..) | ScrollFragmentPoint(..) | SetUnRenderedColor(..) | LoadComplete(..) => () } } } }
SetIds(_, response_chan, _) => { response_chan.send(()); }
random_line_split
poll_fn.rs
use crate::fmt; use crate::future::Future; use crate::pin::Pin; use crate::task::{Context, Poll}; /// Creates a future that wraps a function returning [`Poll`]. /// /// Polling the future delegates to the wrapped function. /// /// # Examples /// /// ``` /// #![feature(future_poll_fn)] /// # async fn run() { /// use core::future::poll_fn; /// use std::task::{Context, Poll}; /// /// fn read_line(_cx: &mut Context<'_>) -> Poll<String> { /// Poll::Ready("Hello, World!".into()) /// } /// /// let read_future = poll_fn(read_line); /// assert_eq!(read_future.await, "Hello, World!".to_owned()); /// # } /// ``` #[unstable(feature = "future_poll_fn", issue = "72302")] pub fn poll_fn<T, F>(f: F) -> PollFn<F> where F: FnMut(&mut Context<'_>) -> Poll<T>, { PollFn { f } } /// A Future that wraps a function returning [`Poll`]. /// /// This `struct` is created by [`poll_fn()`]. See its /// documentation for more. #[must_use = "futures do nothing unless you `.await` or poll them"] #[unstable(feature = "future_poll_fn", issue = "72302")] pub struct PollFn<F> { f: F, } #[unstable(feature = "future_poll_fn", issue = "72302")] impl<F> Unpin for PollFn<F> {} #[unstable(feature = "future_poll_fn", issue = "72302")] impl<F> fmt::Debug for PollFn<F> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PollFn").finish() } } #[unstable(feature = "future_poll_fn", issue = "72302")] impl<T, F> Future for PollFn<F> where F: FnMut(&mut Context<'_>) -> Poll<T>, { type Output = T; fn
(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<T> { (&mut self.f)(cx) } }
poll
identifier_name
poll_fn.rs
use crate::fmt; use crate::future::Future; use crate::pin::Pin; use crate::task::{Context, Poll}; /// Creates a future that wraps a function returning [`Poll`]. /// /// Polling the future delegates to the wrapped function. /// /// # Examples /// /// ``` /// #![feature(future_poll_fn)] /// # async fn run() { /// use core::future::poll_fn; /// use std::task::{Context, Poll}; /// /// fn read_line(_cx: &mut Context<'_>) -> Poll<String> { /// Poll::Ready("Hello, World!".into()) /// } /// /// let read_future = poll_fn(read_line); /// assert_eq!(read_future.await, "Hello, World!".to_owned()); /// # } /// ``` #[unstable(feature = "future_poll_fn", issue = "72302")] pub fn poll_fn<T, F>(f: F) -> PollFn<F> where F: FnMut(&mut Context<'_>) -> Poll<T>,
/// A Future that wraps a function returning [`Poll`]. /// /// This `struct` is created by [`poll_fn()`]. See its /// documentation for more. #[must_use = "futures do nothing unless you `.await` or poll them"] #[unstable(feature = "future_poll_fn", issue = "72302")] pub struct PollFn<F> { f: F, } #[unstable(feature = "future_poll_fn", issue = "72302")] impl<F> Unpin for PollFn<F> {} #[unstable(feature = "future_poll_fn", issue = "72302")] impl<F> fmt::Debug for PollFn<F> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PollFn").finish() } } #[unstable(feature = "future_poll_fn", issue = "72302")] impl<T, F> Future for PollFn<F> where F: FnMut(&mut Context<'_>) -> Poll<T>, { type Output = T; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<T> { (&mut self.f)(cx) } }
{ PollFn { f } }
identifier_body
poll_fn.rs
use crate::fmt;
use crate::task::{Context, Poll}; /// Creates a future that wraps a function returning [`Poll`]. /// /// Polling the future delegates to the wrapped function. /// /// # Examples /// /// ``` /// #![feature(future_poll_fn)] /// # async fn run() { /// use core::future::poll_fn; /// use std::task::{Context, Poll}; /// /// fn read_line(_cx: &mut Context<'_>) -> Poll<String> { /// Poll::Ready("Hello, World!".into()) /// } /// /// let read_future = poll_fn(read_line); /// assert_eq!(read_future.await, "Hello, World!".to_owned()); /// # } /// ``` #[unstable(feature = "future_poll_fn", issue = "72302")] pub fn poll_fn<T, F>(f: F) -> PollFn<F> where F: FnMut(&mut Context<'_>) -> Poll<T>, { PollFn { f } } /// A Future that wraps a function returning [`Poll`]. /// /// This `struct` is created by [`poll_fn()`]. See its /// documentation for more. #[must_use = "futures do nothing unless you `.await` or poll them"] #[unstable(feature = "future_poll_fn", issue = "72302")] pub struct PollFn<F> { f: F, } #[unstable(feature = "future_poll_fn", issue = "72302")] impl<F> Unpin for PollFn<F> {} #[unstable(feature = "future_poll_fn", issue = "72302")] impl<F> fmt::Debug for PollFn<F> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PollFn").finish() } } #[unstable(feature = "future_poll_fn", issue = "72302")] impl<T, F> Future for PollFn<F> where F: FnMut(&mut Context<'_>) -> Poll<T>, { type Output = T; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<T> { (&mut self.f)(cx) } }
use crate::future::Future; use crate::pin::Pin;
random_line_split
mod.rs
pub mod vcx; pub mod connection; pub mod issuer_credential; pub mod utils; pub mod proof; pub mod credential_def; pub mod schema; pub mod credential; pub mod disclosed_proof; pub mod wallet; pub mod logger; pub mod return_types_u32; use std::fmt; /// This macro allows the VcxStateType to be /// serialized within serde as an integer (represented as /// a string, because its still JSON). macro_rules! enum_number { ($name:ident { $($variant:ident = $value:expr, )* }) => { #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum $name { $($variant = $value,)* } impl ::serde::Serialize for $name { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: ::serde::Serializer { // Serialize the enum as a u64. serializer.serialize_u64(*self as u64) } } impl<'de> ::serde::Deserialize<'de> for $name { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: ::serde::Deserializer<'de> { struct Visitor; impl<'de> ::serde::de::Visitor<'de> for Visitor { type Value = $name; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("positive integer") } fn visit_u64<E>(self, value: u64) -> Result<$name, E> where E: ::serde::de::Error { // Rust does not come with a simple way of converting a // number to an enum, so use a big `match`. match value { $( $value => Ok($name::$variant), )* _ => Err(E::custom( format!("unknown {} value: {}", stringify!($name), value))), } } } // Deserialize the enum from a u64. deserializer.deserialize_u64(Visitor) } } } } enum_number!(VcxStateType { VcxStateNone = 0, VcxStateInitialized = 1, VcxStateOfferSent = 2, VcxStateRequestReceived = 3, VcxStateAccepted = 4, VcxStateUnfulfilled = 5, VcxStateExpired = 6, VcxStateRevoked = 7, }); // undefined is correlated with VcxStateNon -> Haven't received Proof // Validated is both validated by indy-sdk and by comparing proof-request // Invalid is that it failed one or both of validation processes enum_number!(ProofStateType { ProofUndefined = 0, ProofValidated = 1, ProofInvalid = 2, }); #[repr(C)] pub struct VcxStatus { pub handle: libc::c_int, pub status: libc::c_int, pub msg: *mut libc::c_char, } #[cfg(test)] mod tests { use super::*; use serde_json; use self::VcxStateType::*; #[test] fn
(){ let z = VcxStateNone; let y = serde_json::to_string(&z).unwrap(); assert_eq!(y,"0"); } }
test_serialize_vcx_state_type
identifier_name
mod.rs
pub mod vcx; pub mod connection; pub mod issuer_credential; pub mod utils; pub mod proof; pub mod credential_def; pub mod schema; pub mod credential; pub mod disclosed_proof; pub mod wallet; pub mod logger; pub mod return_types_u32; use std::fmt; /// This macro allows the VcxStateType to be /// serialized within serde as an integer (represented as /// a string, because its still JSON). macro_rules! enum_number { ($name:ident { $($variant:ident = $value:expr, )* }) => { #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum $name { $($variant = $value,)* } impl ::serde::Serialize for $name { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: ::serde::Serializer { // Serialize the enum as a u64. serializer.serialize_u64(*self as u64) } } impl<'de> ::serde::Deserialize<'de> for $name { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: ::serde::Deserializer<'de> { struct Visitor; impl<'de> ::serde::de::Visitor<'de> for Visitor { type Value = $name; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("positive integer") } fn visit_u64<E>(self, value: u64) -> Result<$name, E> where E: ::serde::de::Error { // Rust does not come with a simple way of converting a // number to an enum, so use a big `match`. match value { $( $value => Ok($name::$variant), )* _ => Err(E::custom( format!("unknown {} value: {}", stringify!($name), value))), } } } // Deserialize the enum from a u64. deserializer.deserialize_u64(Visitor) } } } } enum_number!(VcxStateType { VcxStateNone = 0, VcxStateInitialized = 1, VcxStateOfferSent = 2, VcxStateRequestReceived = 3, VcxStateAccepted = 4, VcxStateUnfulfilled = 5, VcxStateExpired = 6, VcxStateRevoked = 7, }); // undefined is correlated with VcxStateNon -> Haven't received Proof // Validated is both validated by indy-sdk and by comparing proof-request // Invalid is that it failed one or both of validation processes enum_number!(ProofStateType { ProofUndefined = 0, ProofValidated = 1, ProofInvalid = 2, }); #[repr(C)] pub struct VcxStatus { pub handle: libc::c_int,
#[cfg(test)] mod tests { use super::*; use serde_json; use self::VcxStateType::*; #[test] fn test_serialize_vcx_state_type(){ let z = VcxStateNone; let y = serde_json::to_string(&z).unwrap(); assert_eq!(y,"0"); } }
pub status: libc::c_int, pub msg: *mut libc::c_char, }
random_line_split
movelist.rs
// This file is part of the shakmaty library. // Copyright (C) 2017-2022 Niklas Fiekas <[email protected]> // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful,
// GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. use arrayvec::ArrayVec; use crate::types::Move; /// A container for moves that can be stored inline on the stack. /// /// The capacity is limited, but there is enough space to hold the legal /// moves of any chess position, including any of the supported chess variants, /// if enabled. /// /// # Example /// /// ``` /// use shakmaty::{Chess, Position, Role}; /// /// let pos = Chess::default(); /// let mut moves = pos.legal_moves(); /// moves.retain(|m| m.role() == Role::Pawn); /// assert_eq!(moves.len(), 16); /// ``` pub type MoveList = ArrayVec< Move, { #[cfg(feature = "variant")] { 512 } #[cfg(not(feature = "variant"))] { 256 } }, >;
// but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
random_line_split
node_count.rs
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Simply gives a rought count of the number of nodes in an AST. use visit::*; use ast::*; use syntax_pos::Span; pub struct NodeCounter { pub count: usize, } impl NodeCounter { pub fn new() -> NodeCounter { NodeCounter { count: 0, } } } impl<'ast> Visitor<'ast> for NodeCounter { fn visit_ident(&mut self, span: Span, ident: Ident) { self.count += 1; walk_ident(self, span, ident); } fn visit_mod(&mut self, m: &Mod, _s: Span, _a: &[Attribute], _n: NodeId) { self.count += 1; walk_mod(self, m) } fn visit_foreign_item(&mut self, i: &ForeignItem) { self.count += 1; walk_foreign_item(self, i) } fn visit_item(&mut self, i: &Item) { self.count += 1; walk_item(self, i) } fn visit_local(&mut self, l: &Local) { self.count += 1; walk_local(self, l) } fn visit_block(&mut self, b: &Block) { self.count += 1; walk_block(self, b) } fn visit_stmt(&mut self, s: &Stmt) { self.count += 1; walk_stmt(self, s) } fn visit_arm(&mut self, a: &Arm) { self.count += 1; walk_arm(self, a) } fn visit_pat(&mut self, p: &Pat) { self.count += 1; walk_pat(self, p) } fn visit_expr(&mut self, ex: &Expr) { self.count += 1; walk_expr(self, ex) } fn visit_ty(&mut self, t: &Ty) { self.count += 1; walk_ty(self, t) } fn visit_generics(&mut self, g: &Generics) { self.count += 1; walk_generics(self, g) } fn visit_fn(&mut self, fk: FnKind, fd: &FnDecl, s: Span, _: NodeId) { self.count += 1; walk_fn(self, fk, fd, s) } fn visit_trait_item(&mut self, ti: &TraitItem) { self.count += 1; walk_trait_item(self, ti) } fn visit_impl_item(&mut self, ii: &ImplItem) { self.count += 1; walk_impl_item(self, ii) } fn visit_trait_ref(&mut self, t: &TraitRef) { self.count += 1; walk_trait_ref(self, t) } fn visit_ty_param_bound(&mut self, bounds: &TyParamBound) { self.count += 1; walk_ty_param_bound(self, bounds) } fn visit_poly_trait_ref(&mut self, t: &PolyTraitRef, m: &TraitBoundModifier) { self.count += 1; walk_poly_trait_ref(self, t, m) } fn visit_variant_data(&mut self, s: &VariantData, _: Ident, _: &Generics, _: NodeId, _: Span) { self.count += 1; walk_struct_def(self, s) } fn visit_struct_field(&mut self, s: &StructField) { self.count += 1; walk_struct_field(self, s) } fn visit_enum_def(&mut self, enum_definition: &EnumDef, generics: &Generics, item_id: NodeId, _: Span) { self.count += 1; walk_enum_def(self, enum_definition, generics, item_id) } fn visit_variant(&mut self, v: &Variant, g: &Generics, item_id: NodeId) { self.count += 1; walk_variant(self, v, g, item_id) } fn visit_lifetime(&mut self, lifetime: &Lifetime) { self.count += 1; walk_lifetime(self, lifetime) } fn visit_lifetime_def(&mut self, lifetime: &LifetimeDef) { self.count += 1; walk_lifetime_def(self, lifetime) } fn visit_mac(&mut self, _mac: &Mac) { self.count += 1; walk_mac(self, _mac) } fn visit_path(&mut self, path: &Path, _id: NodeId)
fn visit_path_list_item(&mut self, prefix: &Path, item: &PathListItem) { self.count += 1; walk_path_list_item(self, prefix, item) } fn visit_path_parameters(&mut self, path_span: Span, path_parameters: &PathParameters) { self.count += 1; walk_path_parameters(self, path_span, path_parameters) } fn visit_assoc_type_binding(&mut self, type_binding: &TypeBinding) { self.count += 1; walk_assoc_type_binding(self, type_binding) } fn visit_attribute(&mut self, _attr: &Attribute) { self.count += 1; } }
{ self.count += 1; walk_path(self, path) }
identifier_body
node_count.rs
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Simply gives a rought count of the number of nodes in an AST. use visit::*; use ast::*; use syntax_pos::Span; pub struct NodeCounter { pub count: usize, } impl NodeCounter { pub fn new() -> NodeCounter { NodeCounter { count: 0, } } } impl<'ast> Visitor<'ast> for NodeCounter { fn visit_ident(&mut self, span: Span, ident: Ident) { self.count += 1; walk_ident(self, span, ident); } fn visit_mod(&mut self, m: &Mod, _s: Span, _a: &[Attribute], _n: NodeId) { self.count += 1; walk_mod(self, m) } fn visit_foreign_item(&mut self, i: &ForeignItem) { self.count += 1; walk_foreign_item(self, i) } fn visit_item(&mut self, i: &Item) { self.count += 1; walk_item(self, i) } fn visit_local(&mut self, l: &Local) { self.count += 1; walk_local(self, l) } fn
(&mut self, b: &Block) { self.count += 1; walk_block(self, b) } fn visit_stmt(&mut self, s: &Stmt) { self.count += 1; walk_stmt(self, s) } fn visit_arm(&mut self, a: &Arm) { self.count += 1; walk_arm(self, a) } fn visit_pat(&mut self, p: &Pat) { self.count += 1; walk_pat(self, p) } fn visit_expr(&mut self, ex: &Expr) { self.count += 1; walk_expr(self, ex) } fn visit_ty(&mut self, t: &Ty) { self.count += 1; walk_ty(self, t) } fn visit_generics(&mut self, g: &Generics) { self.count += 1; walk_generics(self, g) } fn visit_fn(&mut self, fk: FnKind, fd: &FnDecl, s: Span, _: NodeId) { self.count += 1; walk_fn(self, fk, fd, s) } fn visit_trait_item(&mut self, ti: &TraitItem) { self.count += 1; walk_trait_item(self, ti) } fn visit_impl_item(&mut self, ii: &ImplItem) { self.count += 1; walk_impl_item(self, ii) } fn visit_trait_ref(&mut self, t: &TraitRef) { self.count += 1; walk_trait_ref(self, t) } fn visit_ty_param_bound(&mut self, bounds: &TyParamBound) { self.count += 1; walk_ty_param_bound(self, bounds) } fn visit_poly_trait_ref(&mut self, t: &PolyTraitRef, m: &TraitBoundModifier) { self.count += 1; walk_poly_trait_ref(self, t, m) } fn visit_variant_data(&mut self, s: &VariantData, _: Ident, _: &Generics, _: NodeId, _: Span) { self.count += 1; walk_struct_def(self, s) } fn visit_struct_field(&mut self, s: &StructField) { self.count += 1; walk_struct_field(self, s) } fn visit_enum_def(&mut self, enum_definition: &EnumDef, generics: &Generics, item_id: NodeId, _: Span) { self.count += 1; walk_enum_def(self, enum_definition, generics, item_id) } fn visit_variant(&mut self, v: &Variant, g: &Generics, item_id: NodeId) { self.count += 1; walk_variant(self, v, g, item_id) } fn visit_lifetime(&mut self, lifetime: &Lifetime) { self.count += 1; walk_lifetime(self, lifetime) } fn visit_lifetime_def(&mut self, lifetime: &LifetimeDef) { self.count += 1; walk_lifetime_def(self, lifetime) } fn visit_mac(&mut self, _mac: &Mac) { self.count += 1; walk_mac(self, _mac) } fn visit_path(&mut self, path: &Path, _id: NodeId) { self.count += 1; walk_path(self, path) } fn visit_path_list_item(&mut self, prefix: &Path, item: &PathListItem) { self.count += 1; walk_path_list_item(self, prefix, item) } fn visit_path_parameters(&mut self, path_span: Span, path_parameters: &PathParameters) { self.count += 1; walk_path_parameters(self, path_span, path_parameters) } fn visit_assoc_type_binding(&mut self, type_binding: &TypeBinding) { self.count += 1; walk_assoc_type_binding(self, type_binding) } fn visit_attribute(&mut self, _attr: &Attribute) { self.count += 1; } }
visit_block
identifier_name
node_count.rs
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Simply gives a rought count of the number of nodes in an AST. use visit::*; use ast::*; use syntax_pos::Span; pub struct NodeCounter { pub count: usize, } impl NodeCounter { pub fn new() -> NodeCounter { NodeCounter { count: 0, } } } impl<'ast> Visitor<'ast> for NodeCounter { fn visit_ident(&mut self, span: Span, ident: Ident) { self.count += 1; walk_ident(self, span, ident); } fn visit_mod(&mut self, m: &Mod, _s: Span, _a: &[Attribute], _n: NodeId) { self.count += 1; walk_mod(self, m) } fn visit_foreign_item(&mut self, i: &ForeignItem) { self.count += 1; walk_foreign_item(self, i) } fn visit_item(&mut self, i: &Item) { self.count += 1; walk_item(self, i) } fn visit_local(&mut self, l: &Local) { self.count += 1; walk_local(self, l) } fn visit_block(&mut self, b: &Block) { self.count += 1; walk_block(self, b) } fn visit_stmt(&mut self, s: &Stmt) { self.count += 1; walk_stmt(self, s) } fn visit_arm(&mut self, a: &Arm) { self.count += 1; walk_arm(self, a) } fn visit_pat(&mut self, p: &Pat) { self.count += 1; walk_pat(self, p) } fn visit_expr(&mut self, ex: &Expr) { self.count += 1; walk_expr(self, ex) } fn visit_ty(&mut self, t: &Ty) { self.count += 1; walk_ty(self, t) } fn visit_generics(&mut self, g: &Generics) { self.count += 1; walk_generics(self, g) } fn visit_fn(&mut self, fk: FnKind, fd: &FnDecl, s: Span, _: NodeId) { self.count += 1; walk_fn(self, fk, fd, s) } fn visit_trait_item(&mut self, ti: &TraitItem) { self.count += 1; walk_trait_item(self, ti) } fn visit_impl_item(&mut self, ii: &ImplItem) { self.count += 1; walk_impl_item(self, ii) } fn visit_trait_ref(&mut self, t: &TraitRef) { self.count += 1; walk_trait_ref(self, t) } fn visit_ty_param_bound(&mut self, bounds: &TyParamBound) { self.count += 1; walk_ty_param_bound(self, bounds) } fn visit_poly_trait_ref(&mut self, t: &PolyTraitRef, m: &TraitBoundModifier) { self.count += 1; walk_poly_trait_ref(self, t, m) } fn visit_variant_data(&mut self, s: &VariantData, _: Ident, _: &Generics, _: NodeId, _: Span) { self.count += 1; walk_struct_def(self, s) } fn visit_struct_field(&mut self, s: &StructField) { self.count += 1; walk_struct_field(self, s) } fn visit_enum_def(&mut self, enum_definition: &EnumDef, generics: &Generics, item_id: NodeId, _: Span) { self.count += 1; walk_enum_def(self, enum_definition, generics, item_id) } fn visit_variant(&mut self, v: &Variant, g: &Generics, item_id: NodeId) { self.count += 1; walk_variant(self, v, g, item_id) } fn visit_lifetime(&mut self, lifetime: &Lifetime) { self.count += 1; walk_lifetime(self, lifetime) } fn visit_lifetime_def(&mut self, lifetime: &LifetimeDef) { self.count += 1; walk_lifetime_def(self, lifetime) } fn visit_mac(&mut self, _mac: &Mac) { self.count += 1; walk_mac(self, _mac) } fn visit_path(&mut self, path: &Path, _id: NodeId) { self.count += 1; walk_path(self, path) } fn visit_path_list_item(&mut self, prefix: &Path, item: &PathListItem) { self.count += 1; walk_path_list_item(self, prefix, item) } fn visit_path_parameters(&mut self, path_span: Span, path_parameters: &PathParameters) { self.count += 1; walk_path_parameters(self, path_span, path_parameters)
walk_assoc_type_binding(self, type_binding) } fn visit_attribute(&mut self, _attr: &Attribute) { self.count += 1; } }
} fn visit_assoc_type_binding(&mut self, type_binding: &TypeBinding) { self.count += 1;
random_line_split
preprocessor.rs
use std::vec::Vec; use std::string::String; use std::collections::HashMap; //Returns a string with comments removed, with everything in lowercase pub fn sanitize_line(input: &mut String) { while let Some(i) = input.find(',') { input.remove(i); } match input.find(';') { Some(i) => *input = input[0..i].to_string(), None => {} } *input = input.trim().to_string().to_lowercase(); } //Preprocessor should support #define VAL xyz pub fn preprocess(input: &Vec<&str>) -> Result<Vec<String>, (String, u32)> { let mut output: Vec<String> = input.iter().map(|x| x.to_string()).collect(); let mut definitions: HashMap<String, String> = HashMap::new(); let mut line_num = 0u32; //Find definitions for i in 0..output.len() { let mut current_line = &mut output[i]; let mut split: Vec<String> = current_line .split_whitespace() .map(|s| s.to_string()) .collect(); //Replace variable let mut new: Vec<String> = Vec::with_capacity(split.len()); for word in &split { if word.starts_with("$") { let to_check; if word.ends_with(",") { to_check = &word[1..word.len() - 1]; } else { to_check = &word[1..]; } match definitions.get(to_check) { Some(n) => new.push(n.clone()), None => return Err((format!("Unknown variable: {}", word), line_num)), } } else { new.push(word.clone()); } } let check_line = current_line.trim_left(); if check_line.starts_with("#") { //Preprocessor definition let mut words = check_line.split_whitespace(); match &words.next().unwrap()[1..] { "define" => { let name = words.next(); let mut definition = String::new(); words.for_each(|x| { definition += x; definition += " " }); definitions.insert(name.unwrap().to_string(), definition); } _ => { return Err(( format!("Invalid preprocessor directive: {}", output[i]), line_num, )) } } if check_line[1..8] == *"define " { let end_name = check_line[9..].find(' ').unwrap() + 9; let name = String::from(&check_line[8..end_name]); let definition = String::from(&check_line[end_name..]); definitions.insert(name, definition); } else { return Err(( format!("Invalid preprocessor directive: {}", output[i]), line_num, )); } } //Remove preprocessor lines current_line.clear(); if new.len() > 0 { if!new[0].starts_with("#") { for word in new { current_line.push_str(&word); current_line.push_str(" "); } } } line_num += 1; } //Sanitize let mut converted: Vec<String> = Vec::new(); for i in output { let mut line = i.to_string();
} Ok(converted) }
sanitize_line(&mut line); if !line.is_empty() { converted.push(line); }
random_line_split
preprocessor.rs
use std::vec::Vec; use std::string::String; use std::collections::HashMap; //Returns a string with comments removed, with everything in lowercase pub fn
(input: &mut String) { while let Some(i) = input.find(',') { input.remove(i); } match input.find(';') { Some(i) => *input = input[0..i].to_string(), None => {} } *input = input.trim().to_string().to_lowercase(); } //Preprocessor should support #define VAL xyz pub fn preprocess(input: &Vec<&str>) -> Result<Vec<String>, (String, u32)> { let mut output: Vec<String> = input.iter().map(|x| x.to_string()).collect(); let mut definitions: HashMap<String, String> = HashMap::new(); let mut line_num = 0u32; //Find definitions for i in 0..output.len() { let mut current_line = &mut output[i]; let mut split: Vec<String> = current_line .split_whitespace() .map(|s| s.to_string()) .collect(); //Replace variable let mut new: Vec<String> = Vec::with_capacity(split.len()); for word in &split { if word.starts_with("$") { let to_check; if word.ends_with(",") { to_check = &word[1..word.len() - 1]; } else { to_check = &word[1..]; } match definitions.get(to_check) { Some(n) => new.push(n.clone()), None => return Err((format!("Unknown variable: {}", word), line_num)), } } else { new.push(word.clone()); } } let check_line = current_line.trim_left(); if check_line.starts_with("#") { //Preprocessor definition let mut words = check_line.split_whitespace(); match &words.next().unwrap()[1..] { "define" => { let name = words.next(); let mut definition = String::new(); words.for_each(|x| { definition += x; definition += " " }); definitions.insert(name.unwrap().to_string(), definition); } _ => { return Err(( format!("Invalid preprocessor directive: {}", output[i]), line_num, )) } } if check_line[1..8] == *"define " { let end_name = check_line[9..].find(' ').unwrap() + 9; let name = String::from(&check_line[8..end_name]); let definition = String::from(&check_line[end_name..]); definitions.insert(name, definition); } else { return Err(( format!("Invalid preprocessor directive: {}", output[i]), line_num, )); } } //Remove preprocessor lines current_line.clear(); if new.len() > 0 { if!new[0].starts_with("#") { for word in new { current_line.push_str(&word); current_line.push_str(" "); } } } line_num += 1; } //Sanitize let mut converted: Vec<String> = Vec::new(); for i in output { let mut line = i.to_string(); sanitize_line(&mut line); if!line.is_empty() { converted.push(line); } } Ok(converted) }
sanitize_line
identifier_name
preprocessor.rs
use std::vec::Vec; use std::string::String; use std::collections::HashMap; //Returns a string with comments removed, with everything in lowercase pub fn sanitize_line(input: &mut String) { while let Some(i) = input.find(',') { input.remove(i); } match input.find(';') { Some(i) => *input = input[0..i].to_string(), None => {} } *input = input.trim().to_string().to_lowercase(); } //Preprocessor should support #define VAL xyz pub fn preprocess(input: &Vec<&str>) -> Result<Vec<String>, (String, u32)> { let mut output: Vec<String> = input.iter().map(|x| x.to_string()).collect(); let mut definitions: HashMap<String, String> = HashMap::new(); let mut line_num = 0u32; //Find definitions for i in 0..output.len() { let mut current_line = &mut output[i]; let mut split: Vec<String> = current_line .split_whitespace() .map(|s| s.to_string()) .collect(); //Replace variable let mut new: Vec<String> = Vec::with_capacity(split.len()); for word in &split { if word.starts_with("$")
else { new.push(word.clone()); } } let check_line = current_line.trim_left(); if check_line.starts_with("#") { //Preprocessor definition let mut words = check_line.split_whitespace(); match &words.next().unwrap()[1..] { "define" => { let name = words.next(); let mut definition = String::new(); words.for_each(|x| { definition += x; definition += " " }); definitions.insert(name.unwrap().to_string(), definition); } _ => { return Err(( format!("Invalid preprocessor directive: {}", output[i]), line_num, )) } } if check_line[1..8] == *"define " { let end_name = check_line[9..].find(' ').unwrap() + 9; let name = String::from(&check_line[8..end_name]); let definition = String::from(&check_line[end_name..]); definitions.insert(name, definition); } else { return Err(( format!("Invalid preprocessor directive: {}", output[i]), line_num, )); } } //Remove preprocessor lines current_line.clear(); if new.len() > 0 { if!new[0].starts_with("#") { for word in new { current_line.push_str(&word); current_line.push_str(" "); } } } line_num += 1; } //Sanitize let mut converted: Vec<String> = Vec::new(); for i in output { let mut line = i.to_string(); sanitize_line(&mut line); if!line.is_empty() { converted.push(line); } } Ok(converted) }
{ let to_check; if word.ends_with(",") { to_check = &word[1..word.len() - 1]; } else { to_check = &word[1..]; } match definitions.get(to_check) { Some(n) => new.push(n.clone()), None => return Err((format!("Unknown variable: {}", word), line_num)), } }
conditional_block
preprocessor.rs
use std::vec::Vec; use std::string::String; use std::collections::HashMap; //Returns a string with comments removed, with everything in lowercase pub fn sanitize_line(input: &mut String)
//Preprocessor should support #define VAL xyz pub fn preprocess(input: &Vec<&str>) -> Result<Vec<String>, (String, u32)> { let mut output: Vec<String> = input.iter().map(|x| x.to_string()).collect(); let mut definitions: HashMap<String, String> = HashMap::new(); let mut line_num = 0u32; //Find definitions for i in 0..output.len() { let mut current_line = &mut output[i]; let mut split: Vec<String> = current_line .split_whitespace() .map(|s| s.to_string()) .collect(); //Replace variable let mut new: Vec<String> = Vec::with_capacity(split.len()); for word in &split { if word.starts_with("$") { let to_check; if word.ends_with(",") { to_check = &word[1..word.len() - 1]; } else { to_check = &word[1..]; } match definitions.get(to_check) { Some(n) => new.push(n.clone()), None => return Err((format!("Unknown variable: {}", word), line_num)), } } else { new.push(word.clone()); } } let check_line = current_line.trim_left(); if check_line.starts_with("#") { //Preprocessor definition let mut words = check_line.split_whitespace(); match &words.next().unwrap()[1..] { "define" => { let name = words.next(); let mut definition = String::new(); words.for_each(|x| { definition += x; definition += " " }); definitions.insert(name.unwrap().to_string(), definition); } _ => { return Err(( format!("Invalid preprocessor directive: {}", output[i]), line_num, )) } } if check_line[1..8] == *"define " { let end_name = check_line[9..].find(' ').unwrap() + 9; let name = String::from(&check_line[8..end_name]); let definition = String::from(&check_line[end_name..]); definitions.insert(name, definition); } else { return Err(( format!("Invalid preprocessor directive: {}", output[i]), line_num, )); } } //Remove preprocessor lines current_line.clear(); if new.len() > 0 { if!new[0].starts_with("#") { for word in new { current_line.push_str(&word); current_line.push_str(" "); } } } line_num += 1; } //Sanitize let mut converted: Vec<String> = Vec::new(); for i in output { let mut line = i.to_string(); sanitize_line(&mut line); if!line.is_empty() { converted.push(line); } } Ok(converted) }
{ while let Some(i) = input.find(',') { input.remove(i); } match input.find(';') { Some(i) => *input = input[0..i].to_string(), None => {} } *input = input.trim().to_string().to_lowercase(); }
identifier_body
restrictederrorinfo.rs
// Copyright © 2017 winapi-rs developers // Licensed under the Apache License, Version 2.0 // <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option. // All files in the project carrying such notice may not be copied, modified, or distributed
interface IRestrictedErrorInfo(IRestrictedErrorInfoVtbl): IUnknown(IUnknownVtbl) { fn GetErrorDetails( description: *mut ::BSTR, error: *mut ::HRESULT, restrictedDescription: *mut ::BSTR, capabilitySid: *mut ::BSTR, ) -> ::HRESULT, fn GetReference( reference: *mut ::BSTR, ) -> ::HRESULT, } );
// except according to those terms. RIDL!( #[uuid(0x82ba7092, 0x4c88, 0x427d, 0xa7, 0xbc, 0x16, 0xdd, 0x93, 0xfe, 0xb6, 0x7e)]
random_line_split
std.rs
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! # The Rust standard library The Rust standard library is a group of interrelated modules defining the core language traits, operations on built-in data types, collections, platform abstractions, the task scheduler, runtime support for language features and other common functionality. `std` includes modules corresponding to each of the integer types, each of the floating point types, the `bool` type, tuples, characters, strings (`str`), vectors (`vec`), managed boxes (`managed`), owned boxes (`owned`), and unsafe and borrowed pointers (`ptr`, `borrowed`). Additionally, `std` provides pervasive types (`option` and `result`), task creation and communication primitives (`task`, `comm`), platform abstractions (`os` and `path`), basic I/O abstractions (`io`), common traits (`kinds`, `ops`, `cmp`, `num`, `to_str`), and complete bindings to the C standard library (`libc`). # Standard library injection and the Rust prelude `std` is imported at the topmost level of every crate by default, as if the first line of each crate was extern mod std; This means that the contents of std can be accessed from any context with the `std::` path prefix, as in `use std::vec`, `use std::task::spawn`, etc. Additionally, `std` contains a `prelude` module that reexports many of the most common types, traits and functions. The contents of the prelude are imported into every *module* by default. Implicitly, all modules behave as if they contained the following prologue: use std::prelude::*; */ #[link(name = "std", vers = "0.7", uuid = "c70c24a7-5551-4f73-8e37-380b11d80be8", url = "https://github.com/mozilla/rust/tree/master/src/libstd")]; #[comment = "The Rust standard library"]; #[license = "MIT/ASL2"]; #[crate_type = "lib"]; // Don't link to std. We are std. #[no_std]; #[deny(non_camel_case_types)]; #[deny(missing_doc)]; // Make std testable by not duplicating lang items. See #2912 #[cfg(test)] extern mod realstd(name = "std"); #[cfg(test)] pub use kinds = realstd::kinds; #[cfg(test)] pub use ops = realstd::ops; #[cfg(test)] pub use cmp = realstd::cmp; // On Linux, link to the runtime with -lrt. #[cfg(target_os = "linux")] #[doc(hidden)] pub mod linkhack { #[link_args="-lrustrt -lrt"] #[link_args = "-lpthread"] extern { } } // Internal macros mod macros; /* The Prelude. */ pub mod prelude; /* Primitive types */ #[path = "num/int_macros.rs"] mod int_macros; #[path = "num/uint_macros.rs"] mod uint_macros; #[path = "num/int.rs"] pub mod int; #[path = "num/i8.rs"] pub mod i8; #[path = "num/i16.rs"] pub mod i16; #[path = "num/i32.rs"] pub mod i32; #[path = "num/i64.rs"] pub mod i64; #[path = "num/uint.rs"] pub mod uint; #[path = "num/u8.rs"] pub mod u8; #[path = "num/u16.rs"] pub mod u16; #[path = "num/u32.rs"] pub mod u32; #[path = "num/u64.rs"] pub mod u64; #[path = "num/float.rs"] pub mod float; #[path = "num/f32.rs"] pub mod f32; #[path = "num/f64.rs"] pub mod f64; pub mod nil; pub mod bool; pub mod char; pub mod tuple; pub mod vec; pub mod at_vec; pub mod str; #[path = "str/ascii.rs"] pub mod ascii; pub mod ptr; pub mod owned; pub mod managed; pub mod borrow; /* Core language traits */ #[cfg(not(test))] pub mod kinds; #[cfg(not(test))] pub mod ops; #[cfg(not(test))] pub mod cmp; /* Common traits */ pub mod from_str; #[path = "num/num.rs"] pub mod num; pub mod iter; pub mod iterator; pub mod to_str; pub mod to_bytes; pub mod clone; pub mod io; pub mod hash; pub mod container; /* Common data structures */ pub mod option; pub mod result; pub mod either; pub mod hashmap; pub mod cell; pub mod trie; /* Tasks and communication */ #[path = "task/mod.rs"] pub mod task; pub mod comm; pub mod pipes; pub mod local_data; /* Runtime and platform support */ pub mod gc; pub mod libc; pub mod os; pub mod path; pub mod rand; pub mod run; pub mod sys; pub mod cast; pub mod repr; pub mod cleanup; pub mod reflect; pub mod condition; pub mod logging; pub mod util; /* Unsupported interfaces */ // Private APIs #[path = "unstable/mod.rs"] pub mod unstable; /* For internal use, not exported */ mod unicode; #[path = "num/cmath.rs"] mod cmath; mod stackwalk; // XXX: This shouldn't be pub, and it should be reexported under 'unstable' // but name resolution doesn't work without it being pub. #[path = "rt/mod.rs"] pub mod rt; // A curious inner-module that's not exported that contains the binding //'std' so that macro-expanded references to std::error and such // can be resolved within libstd. #[doc(hidden)] mod core { pub use clone; pub use cmp; pub use condition; pub use option; pub use kinds; pub use sys; pub use pipes; } #[doc(hidden)] mod std { pub use clone; pub use cmp; pub use condition; pub use option; pub use kinds; pub use sys; pub use pipes; pub use unstable; pub use str; pub use os; }
// file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
random_line_split
traits.rs
// Copyright 2015, 2016 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity. If not, see <http://www.gnu.org/licenses/>. //! Disk-backed `HashDB` implementation. use common::*; use hashdb::*; use kvdb::{Database, DBTransaction}; /// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually /// exclusive actions. pub trait JournalDB: HashDB { /// Return a copy of ourself, in a box. fn boxed_clone(&self) -> Box<JournalDB>; /// Returns heap memory size used fn mem_used(&self) -> usize; /// Returns the size of journalled state in memory. /// This function has a considerable speed requirement -- /// it must be fast enough to call several times per block imported. fn journal_size(&self) -> usize { 0 } /// Check if this database has any commits fn is_empty(&self) -> bool; /// Get the earliest era in the DB. None if there isn't yet any data in there. fn earliest_era(&self) -> Option<u64> { None } /// Get the latest era in the DB. None if there isn't yet any data in there. fn latest_era(&self) -> Option<u64>; /// Journal recent database operations as being associated with a given era and id. // TODO: give the overlay to this function so journaldbs don't manage the overlays themeselves. fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> Result<u32, UtilError>; /// Mark a given block as canonical, indicating that competing blocks' states may be pruned out. fn mark_canonical(&mut self, batch: &mut DBTransaction, era: u64, id: &H256) -> Result<u32, UtilError>; /// Commit all queued insert and delete operations without affecting any journalling -- this requires that all insertions /// and deletions are indeed canonical and will likely lead to an invalid database if that assumption is violated. /// /// Any keys or values inserted or deleted must be completely independent of those affected /// by any previous `commit` operations. Essentially, this means that `inject` can be used /// either to restore a state to a fresh database, or to insert data which may only be journalled /// from this point onwards. fn inject(&mut self, batch: &mut DBTransaction) -> Result<u32, UtilError>; /// State data query fn state(&self, _id: &H256) -> Option<Bytes>; /// Whether this database is pruned. fn is_pruned(&self) -> bool { true } /// Get backing database. fn backing(&self) -> &Arc<Database>; /// Clear internal strucutres. This should called after changes have been written /// to the backing strage fn flush(&self) {} /// Consolidate all the insertions and deletions in the given memory overlay. fn consolidate(&mut self, overlay: ::memorydb::MemoryDB); /// Commit all changes in a single batch #[cfg(test)] fn commit_batch(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError>
/// Inject all changes in a single batch. #[cfg(test)] fn inject_batch(&mut self) -> Result<u32, UtilError> { let mut batch = self.backing().transaction(); let res = self.inject(&mut batch)?; self.backing().write(batch).map(|_| res).map_err(Into::into) } }
{ let mut batch = self.backing().transaction(); let mut ops = self.journal_under(&mut batch, now, id)?; if let Some((end_era, canon_id)) = end { ops += self.mark_canonical(&mut batch, end_era, &canon_id)?; } let result = self.backing().write(batch).map(|_| ops).map_err(Into::into); self.flush(); result }
identifier_body
traits.rs
// Copyright 2015, 2016 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity. If not, see <http://www.gnu.org/licenses/>. //! Disk-backed `HashDB` implementation. use common::*; use hashdb::*; use kvdb::{Database, DBTransaction}; /// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually /// exclusive actions. pub trait JournalDB: HashDB { /// Return a copy of ourself, in a box. fn boxed_clone(&self) -> Box<JournalDB>; /// Returns heap memory size used fn mem_used(&self) -> usize; /// Returns the size of journalled state in memory. /// This function has a considerable speed requirement -- /// it must be fast enough to call several times per block imported. fn journal_size(&self) -> usize { 0 } /// Check if this database has any commits fn is_empty(&self) -> bool; /// Get the earliest era in the DB. None if there isn't yet any data in there. fn earliest_era(&self) -> Option<u64> { None } /// Get the latest era in the DB. None if there isn't yet any data in there. fn latest_era(&self) -> Option<u64>; /// Journal recent database operations as being associated with a given era and id. // TODO: give the overlay to this function so journaldbs don't manage the overlays themeselves. fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> Result<u32, UtilError>; /// Mark a given block as canonical, indicating that competing blocks' states may be pruned out. fn mark_canonical(&mut self, batch: &mut DBTransaction, era: u64, id: &H256) -> Result<u32, UtilError>; /// Commit all queued insert and delete operations without affecting any journalling -- this requires that all insertions /// and deletions are indeed canonical and will likely lead to an invalid database if that assumption is violated. /// /// Any keys or values inserted or deleted must be completely independent of those affected /// by any previous `commit` operations. Essentially, this means that `inject` can be used /// either to restore a state to a fresh database, or to insert data which may only be journalled /// from this point onwards. fn inject(&mut self, batch: &mut DBTransaction) -> Result<u32, UtilError>; /// State data query fn state(&self, _id: &H256) -> Option<Bytes>; /// Whether this database is pruned. fn is_pruned(&self) -> bool { true } /// Get backing database. fn backing(&self) -> &Arc<Database>;
fn consolidate(&mut self, overlay: ::memorydb::MemoryDB); /// Commit all changes in a single batch #[cfg(test)] fn commit_batch(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> { let mut batch = self.backing().transaction(); let mut ops = self.journal_under(&mut batch, now, id)?; if let Some((end_era, canon_id)) = end { ops += self.mark_canonical(&mut batch, end_era, &canon_id)?; } let result = self.backing().write(batch).map(|_| ops).map_err(Into::into); self.flush(); result } /// Inject all changes in a single batch. #[cfg(test)] fn inject_batch(&mut self) -> Result<u32, UtilError> { let mut batch = self.backing().transaction(); let res = self.inject(&mut batch)?; self.backing().write(batch).map(|_| res).map_err(Into::into) } }
/// Clear internal strucutres. This should called after changes have been written /// to the backing strage fn flush(&self) {} /// Consolidate all the insertions and deletions in the given memory overlay.
random_line_split
traits.rs
// Copyright 2015, 2016 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity. If not, see <http://www.gnu.org/licenses/>. //! Disk-backed `HashDB` implementation. use common::*; use hashdb::*; use kvdb::{Database, DBTransaction}; /// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually /// exclusive actions. pub trait JournalDB: HashDB { /// Return a copy of ourself, in a box. fn boxed_clone(&self) -> Box<JournalDB>; /// Returns heap memory size used fn mem_used(&self) -> usize; /// Returns the size of journalled state in memory. /// This function has a considerable speed requirement -- /// it must be fast enough to call several times per block imported. fn journal_size(&self) -> usize { 0 } /// Check if this database has any commits fn is_empty(&self) -> bool; /// Get the earliest era in the DB. None if there isn't yet any data in there. fn earliest_era(&self) -> Option<u64> { None } /// Get the latest era in the DB. None if there isn't yet any data in there. fn latest_era(&self) -> Option<u64>; /// Journal recent database operations as being associated with a given era and id. // TODO: give the overlay to this function so journaldbs don't manage the overlays themeselves. fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> Result<u32, UtilError>; /// Mark a given block as canonical, indicating that competing blocks' states may be pruned out. fn mark_canonical(&mut self, batch: &mut DBTransaction, era: u64, id: &H256) -> Result<u32, UtilError>; /// Commit all queued insert and delete operations without affecting any journalling -- this requires that all insertions /// and deletions are indeed canonical and will likely lead to an invalid database if that assumption is violated. /// /// Any keys or values inserted or deleted must be completely independent of those affected /// by any previous `commit` operations. Essentially, this means that `inject` can be used /// either to restore a state to a fresh database, or to insert data which may only be journalled /// from this point onwards. fn inject(&mut self, batch: &mut DBTransaction) -> Result<u32, UtilError>; /// State data query fn state(&self, _id: &H256) -> Option<Bytes>; /// Whether this database is pruned. fn is_pruned(&self) -> bool { true } /// Get backing database. fn backing(&self) -> &Arc<Database>; /// Clear internal strucutres. This should called after changes have been written /// to the backing strage fn flush(&self) {} /// Consolidate all the insertions and deletions in the given memory overlay. fn consolidate(&mut self, overlay: ::memorydb::MemoryDB); /// Commit all changes in a single batch #[cfg(test)] fn commit_batch(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> { let mut batch = self.backing().transaction(); let mut ops = self.journal_under(&mut batch, now, id)?; if let Some((end_era, canon_id)) = end
let result = self.backing().write(batch).map(|_| ops).map_err(Into::into); self.flush(); result } /// Inject all changes in a single batch. #[cfg(test)] fn inject_batch(&mut self) -> Result<u32, UtilError> { let mut batch = self.backing().transaction(); let res = self.inject(&mut batch)?; self.backing().write(batch).map(|_| res).map_err(Into::into) } }
{ ops += self.mark_canonical(&mut batch, end_era, &canon_id)?; }
conditional_block
traits.rs
// Copyright 2015, 2016 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity. If not, see <http://www.gnu.org/licenses/>. //! Disk-backed `HashDB` implementation. use common::*; use hashdb::*; use kvdb::{Database, DBTransaction}; /// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually /// exclusive actions. pub trait JournalDB: HashDB { /// Return a copy of ourself, in a box. fn boxed_clone(&self) -> Box<JournalDB>; /// Returns heap memory size used fn mem_used(&self) -> usize; /// Returns the size of journalled state in memory. /// This function has a considerable speed requirement -- /// it must be fast enough to call several times per block imported. fn
(&self) -> usize { 0 } /// Check if this database has any commits fn is_empty(&self) -> bool; /// Get the earliest era in the DB. None if there isn't yet any data in there. fn earliest_era(&self) -> Option<u64> { None } /// Get the latest era in the DB. None if there isn't yet any data in there. fn latest_era(&self) -> Option<u64>; /// Journal recent database operations as being associated with a given era and id. // TODO: give the overlay to this function so journaldbs don't manage the overlays themeselves. fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> Result<u32, UtilError>; /// Mark a given block as canonical, indicating that competing blocks' states may be pruned out. fn mark_canonical(&mut self, batch: &mut DBTransaction, era: u64, id: &H256) -> Result<u32, UtilError>; /// Commit all queued insert and delete operations without affecting any journalling -- this requires that all insertions /// and deletions are indeed canonical and will likely lead to an invalid database if that assumption is violated. /// /// Any keys or values inserted or deleted must be completely independent of those affected /// by any previous `commit` operations. Essentially, this means that `inject` can be used /// either to restore a state to a fresh database, or to insert data which may only be journalled /// from this point onwards. fn inject(&mut self, batch: &mut DBTransaction) -> Result<u32, UtilError>; /// State data query fn state(&self, _id: &H256) -> Option<Bytes>; /// Whether this database is pruned. fn is_pruned(&self) -> bool { true } /// Get backing database. fn backing(&self) -> &Arc<Database>; /// Clear internal strucutres. This should called after changes have been written /// to the backing strage fn flush(&self) {} /// Consolidate all the insertions and deletions in the given memory overlay. fn consolidate(&mut self, overlay: ::memorydb::MemoryDB); /// Commit all changes in a single batch #[cfg(test)] fn commit_batch(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> { let mut batch = self.backing().transaction(); let mut ops = self.journal_under(&mut batch, now, id)?; if let Some((end_era, canon_id)) = end { ops += self.mark_canonical(&mut batch, end_era, &canon_id)?; } let result = self.backing().write(batch).map(|_| ops).map_err(Into::into); self.flush(); result } /// Inject all changes in a single batch. #[cfg(test)] fn inject_batch(&mut self) -> Result<u32, UtilError> { let mut batch = self.backing().transaction(); let res = self.inject(&mut batch)?; self.backing().write(batch).map(|_| res).map_err(Into::into) } }
journal_size
identifier_name
http_parts.rs
//! HTTP parts of a Request/Response interaction use std::collections::HashMap; use std::str::from_utf8; use maplit::hashmap; use crate::bodies::OptionalBody; use crate::content_types::{ContentType, detect_content_type_from_string}; use crate::generators::{Generator, GeneratorCategory, Generators}; use crate::matchingrules::{Category, MatchingRules}; use crate::path_exp::DocPath; /// Trait to specify an HTTP part of an interaction. It encapsulates the shared parts of a request /// and response. pub trait HttpPart { /// Returns the headers of the HTTP part. fn headers(&self) -> &Option<HashMap<String, Vec<String>>>; /// Returns the headers of the HTTP part in a mutable form. fn headers_mut(&mut self) -> &mut HashMap<String, Vec<String>>; /// Returns the body of the HTTP part. fn body(&self) -> &OptionalBody; /// Returns a mutable pointer to the body of the HTTP part. fn body_mut(&mut self) -> &mut OptionalBody; /// Returns the matching rules of the HTTP part. fn matching_rules(&self) -> &MatchingRules; /// Returns the matching rules of the HTTP part. fn matching_rules_mut(&mut self) -> &mut MatchingRules; /// Returns the generators of the HTTP part. fn generators(&self) -> &Generators; /// Returns the generators of the HTTP part. fn generators_mut(&mut self) -> &mut Generators; /// Lookup up the content type for the part fn lookup_content_type(&self) -> Option<String>; /// Tries to detect the content type of the body by matching some regular expressions against /// the first 32 characters. fn detect_content_type(&self) -> Option<ContentType> { match *self.body() { OptionalBody::Present(ref body, _, _) => { let s: String = match from_utf8(body) { Ok(s) => s.to_string(), Err(_) => String::new() }; detect_content_type_from_string(&s) }, _ => None } } /// Determine the content type of the HTTP part. If a `Content-Type` header is present, the /// value of that header will be returned. Otherwise, the body will be inspected. fn content_type(&self) -> Option<ContentType> { let body = self.body(); if body.has_content_type() { body.content_type() } else { match self.lookup_content_type() { Some(ref h) => match ContentType::parse(h.as_str()) { Ok(v) => Some(v), Err(_) => self.detect_content_type() }, None => self.detect_content_type() } } } /// Checks if the HTTP Part has the given header fn has_header(&self, header_name: &str) -> bool { self.lookup_header_value(header_name).is_some() } /// Checks if the HTTP Part has the given header fn lookup_header_value(&self, header_name: &str) -> Option<String> { match *self.headers() { Some(ref h) => h.iter() .find(|kv| kv.0.to_lowercase() == header_name.to_lowercase()) .map(|kv| kv.1.clone().join(", ")), None => None } } /// If the body is a textual type (non-binary) fn has_text_body(&self) -> bool { let body = self.body(); let str_body = body.str_value(); body.is_present() &&!str_body.is_empty() && str_body.is_ascii() } /// Convenience method to add a header fn add_header(&mut self, key: &str, val: Vec<&str>) { let headers = self.headers_mut(); headers.insert(key.to_string(), val.iter().map(|v| v.to_string()).collect()); } /// Builds a map of generators from the generators and matching rules fn build_generators(&self, category: &GeneratorCategory) -> HashMap<DocPath, Generator> { let mut generators = hashmap!{}; if let Some(generators_for_category) = self.generators().categories.get(category) { for (path, generator) in generators_for_category { generators.insert(path.clone(), generator.clone()); } } let mr_category: Category = category.clone().into(); if let Some(rules) = self.matching_rules().rules_for_category(mr_category) { for (path, generator) in rules.generators() { generators.insert(path.clone(), generator.clone()); } } generators } } #[cfg(test)] mod tests { use expectest::prelude::*; use maplit::hashmap; use crate::bodies::OptionalBody; use crate::http_parts::HttpPart; use crate::request::Request; #[test] fn
() { let request = Request { method: "GET".to_string(), path: "/".to_string(), query: None, headers: Some(hashmap!{ "Content-Type".to_string() => vec!["application/json; charset=UTF-8".to_string()] }), body: OptionalBody::Missing,.. Request::default() }; expect!(request.has_header("Content-Type")).to(be_true()); expect!(request.lookup_header_value("Content-Type")).to(be_some().value("application/json; charset=UTF-8")); } }
http_part_has_header_test
identifier_name
http_parts.rs
//! HTTP parts of a Request/Response interaction use std::collections::HashMap; use std::str::from_utf8; use maplit::hashmap; use crate::bodies::OptionalBody; use crate::content_types::{ContentType, detect_content_type_from_string}; use crate::generators::{Generator, GeneratorCategory, Generators}; use crate::matchingrules::{Category, MatchingRules}; use crate::path_exp::DocPath; /// Trait to specify an HTTP part of an interaction. It encapsulates the shared parts of a request /// and response. pub trait HttpPart { /// Returns the headers of the HTTP part. fn headers(&self) -> &Option<HashMap<String, Vec<String>>>; /// Returns the headers of the HTTP part in a mutable form. fn headers_mut(&mut self) -> &mut HashMap<String, Vec<String>>; /// Returns the body of the HTTP part. fn body(&self) -> &OptionalBody; /// Returns a mutable pointer to the body of the HTTP part. fn body_mut(&mut self) -> &mut OptionalBody; /// Returns the matching rules of the HTTP part. fn matching_rules(&self) -> &MatchingRules; /// Returns the matching rules of the HTTP part. fn matching_rules_mut(&mut self) -> &mut MatchingRules; /// Returns the generators of the HTTP part. fn generators(&self) -> &Generators; /// Returns the generators of the HTTP part. fn generators_mut(&mut self) -> &mut Generators; /// Lookup up the content type for the part fn lookup_content_type(&self) -> Option<String>; /// Tries to detect the content type of the body by matching some regular expressions against /// the first 32 characters. fn detect_content_type(&self) -> Option<ContentType> { match *self.body() { OptionalBody::Present(ref body, _, _) => { let s: String = match from_utf8(body) { Ok(s) => s.to_string(), Err(_) => String::new() }; detect_content_type_from_string(&s) }, _ => None } } /// Determine the content type of the HTTP part. If a `Content-Type` header is present, the /// value of that header will be returned. Otherwise, the body will be inspected. fn content_type(&self) -> Option<ContentType> { let body = self.body(); if body.has_content_type()
else { match self.lookup_content_type() { Some(ref h) => match ContentType::parse(h.as_str()) { Ok(v) => Some(v), Err(_) => self.detect_content_type() }, None => self.detect_content_type() } } } /// Checks if the HTTP Part has the given header fn has_header(&self, header_name: &str) -> bool { self.lookup_header_value(header_name).is_some() } /// Checks if the HTTP Part has the given header fn lookup_header_value(&self, header_name: &str) -> Option<String> { match *self.headers() { Some(ref h) => h.iter() .find(|kv| kv.0.to_lowercase() == header_name.to_lowercase()) .map(|kv| kv.1.clone().join(", ")), None => None } } /// If the body is a textual type (non-binary) fn has_text_body(&self) -> bool { let body = self.body(); let str_body = body.str_value(); body.is_present() &&!str_body.is_empty() && str_body.is_ascii() } /// Convenience method to add a header fn add_header(&mut self, key: &str, val: Vec<&str>) { let headers = self.headers_mut(); headers.insert(key.to_string(), val.iter().map(|v| v.to_string()).collect()); } /// Builds a map of generators from the generators and matching rules fn build_generators(&self, category: &GeneratorCategory) -> HashMap<DocPath, Generator> { let mut generators = hashmap!{}; if let Some(generators_for_category) = self.generators().categories.get(category) { for (path, generator) in generators_for_category { generators.insert(path.clone(), generator.clone()); } } let mr_category: Category = category.clone().into(); if let Some(rules) = self.matching_rules().rules_for_category(mr_category) { for (path, generator) in rules.generators() { generators.insert(path.clone(), generator.clone()); } } generators } } #[cfg(test)] mod tests { use expectest::prelude::*; use maplit::hashmap; use crate::bodies::OptionalBody; use crate::http_parts::HttpPart; use crate::request::Request; #[test] fn http_part_has_header_test() { let request = Request { method: "GET".to_string(), path: "/".to_string(), query: None, headers: Some(hashmap!{ "Content-Type".to_string() => vec!["application/json; charset=UTF-8".to_string()] }), body: OptionalBody::Missing,.. Request::default() }; expect!(request.has_header("Content-Type")).to(be_true()); expect!(request.lookup_header_value("Content-Type")).to(be_some().value("application/json; charset=UTF-8")); } }
{ body.content_type() }
conditional_block
http_parts.rs
//! HTTP parts of a Request/Response interaction use std::collections::HashMap; use std::str::from_utf8; use maplit::hashmap; use crate::bodies::OptionalBody; use crate::content_types::{ContentType, detect_content_type_from_string}; use crate::generators::{Generator, GeneratorCategory, Generators}; use crate::matchingrules::{Category, MatchingRules}; use crate::path_exp::DocPath; /// Trait to specify an HTTP part of an interaction. It encapsulates the shared parts of a request /// and response. pub trait HttpPart { /// Returns the headers of the HTTP part. fn headers(&self) -> &Option<HashMap<String, Vec<String>>>; /// Returns the headers of the HTTP part in a mutable form. fn headers_mut(&mut self) -> &mut HashMap<String, Vec<String>>;
/// Returns a mutable pointer to the body of the HTTP part. fn body_mut(&mut self) -> &mut OptionalBody; /// Returns the matching rules of the HTTP part. fn matching_rules(&self) -> &MatchingRules; /// Returns the matching rules of the HTTP part. fn matching_rules_mut(&mut self) -> &mut MatchingRules; /// Returns the generators of the HTTP part. fn generators(&self) -> &Generators; /// Returns the generators of the HTTP part. fn generators_mut(&mut self) -> &mut Generators; /// Lookup up the content type for the part fn lookup_content_type(&self) -> Option<String>; /// Tries to detect the content type of the body by matching some regular expressions against /// the first 32 characters. fn detect_content_type(&self) -> Option<ContentType> { match *self.body() { OptionalBody::Present(ref body, _, _) => { let s: String = match from_utf8(body) { Ok(s) => s.to_string(), Err(_) => String::new() }; detect_content_type_from_string(&s) }, _ => None } } /// Determine the content type of the HTTP part. If a `Content-Type` header is present, the /// value of that header will be returned. Otherwise, the body will be inspected. fn content_type(&self) -> Option<ContentType> { let body = self.body(); if body.has_content_type() { body.content_type() } else { match self.lookup_content_type() { Some(ref h) => match ContentType::parse(h.as_str()) { Ok(v) => Some(v), Err(_) => self.detect_content_type() }, None => self.detect_content_type() } } } /// Checks if the HTTP Part has the given header fn has_header(&self, header_name: &str) -> bool { self.lookup_header_value(header_name).is_some() } /// Checks if the HTTP Part has the given header fn lookup_header_value(&self, header_name: &str) -> Option<String> { match *self.headers() { Some(ref h) => h.iter() .find(|kv| kv.0.to_lowercase() == header_name.to_lowercase()) .map(|kv| kv.1.clone().join(", ")), None => None } } /// If the body is a textual type (non-binary) fn has_text_body(&self) -> bool { let body = self.body(); let str_body = body.str_value(); body.is_present() &&!str_body.is_empty() && str_body.is_ascii() } /// Convenience method to add a header fn add_header(&mut self, key: &str, val: Vec<&str>) { let headers = self.headers_mut(); headers.insert(key.to_string(), val.iter().map(|v| v.to_string()).collect()); } /// Builds a map of generators from the generators and matching rules fn build_generators(&self, category: &GeneratorCategory) -> HashMap<DocPath, Generator> { let mut generators = hashmap!{}; if let Some(generators_for_category) = self.generators().categories.get(category) { for (path, generator) in generators_for_category { generators.insert(path.clone(), generator.clone()); } } let mr_category: Category = category.clone().into(); if let Some(rules) = self.matching_rules().rules_for_category(mr_category) { for (path, generator) in rules.generators() { generators.insert(path.clone(), generator.clone()); } } generators } } #[cfg(test)] mod tests { use expectest::prelude::*; use maplit::hashmap; use crate::bodies::OptionalBody; use crate::http_parts::HttpPart; use crate::request::Request; #[test] fn http_part_has_header_test() { let request = Request { method: "GET".to_string(), path: "/".to_string(), query: None, headers: Some(hashmap!{ "Content-Type".to_string() => vec!["application/json; charset=UTF-8".to_string()] }), body: OptionalBody::Missing,.. Request::default() }; expect!(request.has_header("Content-Type")).to(be_true()); expect!(request.lookup_header_value("Content-Type")).to(be_some().value("application/json; charset=UTF-8")); } }
/// Returns the body of the HTTP part. fn body(&self) -> &OptionalBody;
random_line_split
http_parts.rs
//! HTTP parts of a Request/Response interaction use std::collections::HashMap; use std::str::from_utf8; use maplit::hashmap; use crate::bodies::OptionalBody; use crate::content_types::{ContentType, detect_content_type_from_string}; use crate::generators::{Generator, GeneratorCategory, Generators}; use crate::matchingrules::{Category, MatchingRules}; use crate::path_exp::DocPath; /// Trait to specify an HTTP part of an interaction. It encapsulates the shared parts of a request /// and response. pub trait HttpPart { /// Returns the headers of the HTTP part. fn headers(&self) -> &Option<HashMap<String, Vec<String>>>; /// Returns the headers of the HTTP part in a mutable form. fn headers_mut(&mut self) -> &mut HashMap<String, Vec<String>>; /// Returns the body of the HTTP part. fn body(&self) -> &OptionalBody; /// Returns a mutable pointer to the body of the HTTP part. fn body_mut(&mut self) -> &mut OptionalBody; /// Returns the matching rules of the HTTP part. fn matching_rules(&self) -> &MatchingRules; /// Returns the matching rules of the HTTP part. fn matching_rules_mut(&mut self) -> &mut MatchingRules; /// Returns the generators of the HTTP part. fn generators(&self) -> &Generators; /// Returns the generators of the HTTP part. fn generators_mut(&mut self) -> &mut Generators; /// Lookup up the content type for the part fn lookup_content_type(&self) -> Option<String>; /// Tries to detect the content type of the body by matching some regular expressions against /// the first 32 characters. fn detect_content_type(&self) -> Option<ContentType> { match *self.body() { OptionalBody::Present(ref body, _, _) => { let s: String = match from_utf8(body) { Ok(s) => s.to_string(), Err(_) => String::new() }; detect_content_type_from_string(&s) }, _ => None } } /// Determine the content type of the HTTP part. If a `Content-Type` header is present, the /// value of that header will be returned. Otherwise, the body will be inspected. fn content_type(&self) -> Option<ContentType> { let body = self.body(); if body.has_content_type() { body.content_type() } else { match self.lookup_content_type() { Some(ref h) => match ContentType::parse(h.as_str()) { Ok(v) => Some(v), Err(_) => self.detect_content_type() }, None => self.detect_content_type() } } } /// Checks if the HTTP Part has the given header fn has_header(&self, header_name: &str) -> bool
/// Checks if the HTTP Part has the given header fn lookup_header_value(&self, header_name: &str) -> Option<String> { match *self.headers() { Some(ref h) => h.iter() .find(|kv| kv.0.to_lowercase() == header_name.to_lowercase()) .map(|kv| kv.1.clone().join(", ")), None => None } } /// If the body is a textual type (non-binary) fn has_text_body(&self) -> bool { let body = self.body(); let str_body = body.str_value(); body.is_present() &&!str_body.is_empty() && str_body.is_ascii() } /// Convenience method to add a header fn add_header(&mut self, key: &str, val: Vec<&str>) { let headers = self.headers_mut(); headers.insert(key.to_string(), val.iter().map(|v| v.to_string()).collect()); } /// Builds a map of generators from the generators and matching rules fn build_generators(&self, category: &GeneratorCategory) -> HashMap<DocPath, Generator> { let mut generators = hashmap!{}; if let Some(generators_for_category) = self.generators().categories.get(category) { for (path, generator) in generators_for_category { generators.insert(path.clone(), generator.clone()); } } let mr_category: Category = category.clone().into(); if let Some(rules) = self.matching_rules().rules_for_category(mr_category) { for (path, generator) in rules.generators() { generators.insert(path.clone(), generator.clone()); } } generators } } #[cfg(test)] mod tests { use expectest::prelude::*; use maplit::hashmap; use crate::bodies::OptionalBody; use crate::http_parts::HttpPart; use crate::request::Request; #[test] fn http_part_has_header_test() { let request = Request { method: "GET".to_string(), path: "/".to_string(), query: None, headers: Some(hashmap!{ "Content-Type".to_string() => vec!["application/json; charset=UTF-8".to_string()] }), body: OptionalBody::Missing,.. Request::default() }; expect!(request.has_header("Content-Type")).to(be_true()); expect!(request.lookup_header_value("Content-Type")).to(be_some().value("application/json; charset=UTF-8")); } }
{ self.lookup_header_value(header_name).is_some() }
identifier_body
istr.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. fn test_stack_assign() { let s: ~str = ~"a"; println!("{}", s.clone()); let t: ~str = ~"a"; assert!(s == t); let u: ~str = ~"b"; assert!((s!= u)); } fn test_heap_lit() { ~"a big string"; } fn
() { let s: ~str = ~"a big ol' string"; let t: ~str = ~"a big ol' string"; assert!(s == t); let u: ~str = ~"a bad ol' string"; assert!((s!= u)); } fn test_heap_log() { let s = ~"a big ol' string"; println!("{}", s); } fn test_stack_add() { assert_eq!(~"a" + "b", ~"ab"); let s: ~str = ~"a"; assert_eq!(s + s, ~"aa"); assert_eq!(~"" + "", ~""); } fn test_stack_heap_add() { assert!((~"a" + "bracadabra" == ~"abracadabra")); } fn test_heap_add() { assert_eq!(~"this should" + " totally work", ~"this should totally work"); } fn test_append() { let mut s = ~""; s.push_str("a"); assert_eq!(s, ~"a"); let mut s = ~"a"; s.push_str("b"); println!("{}", s.clone()); assert_eq!(s, ~"ab"); let mut s = ~"c"; s.push_str("offee"); assert!(s == ~"coffee"); s.push_str("&tea"); assert!(s == ~"coffee&tea"); } pub fn main() { test_stack_assign(); test_heap_lit(); test_heap_assign(); test_heap_log(); test_stack_add(); test_stack_heap_add(); test_heap_add(); test_append(); }
test_heap_assign
identifier_name
istr.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. fn test_stack_assign() { let s: ~str = ~"a"; println!("{}", s.clone()); let t: ~str = ~"a"; assert!(s == t); let u: ~str = ~"b"; assert!((s!= u)); } fn test_heap_lit() { ~"a big string"; } fn test_heap_assign() { let s: ~str = ~"a big ol' string"; let t: ~str = ~"a big ol' string"; assert!(s == t); let u: ~str = ~"a bad ol' string"; assert!((s!= u)); } fn test_heap_log() { let s = ~"a big ol' string"; println!("{}", s); } fn test_stack_add() { assert_eq!(~"a" + "b", ~"ab"); let s: ~str = ~"a"; assert_eq!(s + s, ~"aa"); assert_eq!(~"" + "", ~""); } fn test_stack_heap_add() { assert!((~"a" + "bracadabra" == ~"abracadabra")); } fn test_heap_add() { assert_eq!(~"this should" + " totally work", ~"this should totally work"); } fn test_append() { let mut s = ~""; s.push_str("a"); assert_eq!(s, ~"a"); let mut s = ~"a"; s.push_str("b"); println!("{}", s.clone()); assert_eq!(s, ~"ab"); let mut s = ~"c"; s.push_str("offee"); assert!(s == ~"coffee"); s.push_str("&tea"); assert!(s == ~"coffee&tea"); } pub fn main() { test_stack_assign(); test_heap_lit();
test_stack_add(); test_stack_heap_add(); test_heap_add(); test_append(); }
test_heap_assign(); test_heap_log();
random_line_split
istr.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. fn test_stack_assign() { let s: ~str = ~"a"; println!("{}", s.clone()); let t: ~str = ~"a"; assert!(s == t); let u: ~str = ~"b"; assert!((s!= u)); } fn test_heap_lit() { ~"a big string"; } fn test_heap_assign()
fn test_heap_log() { let s = ~"a big ol' string"; println!("{}", s); } fn test_stack_add() { assert_eq!(~"a" + "b", ~"ab"); let s: ~str = ~"a"; assert_eq!(s + s, ~"aa"); assert_eq!(~"" + "", ~""); } fn test_stack_heap_add() { assert!((~"a" + "bracadabra" == ~"abracadabra")); } fn test_heap_add() { assert_eq!(~"this should" + " totally work", ~"this should totally work"); } fn test_append() { let mut s = ~""; s.push_str("a"); assert_eq!(s, ~"a"); let mut s = ~"a"; s.push_str("b"); println!("{}", s.clone()); assert_eq!(s, ~"ab"); let mut s = ~"c"; s.push_str("offee"); assert!(s == ~"coffee"); s.push_str("&tea"); assert!(s == ~"coffee&tea"); } pub fn main() { test_stack_assign(); test_heap_lit(); test_heap_assign(); test_heap_log(); test_stack_add(); test_stack_heap_add(); test_heap_add(); test_append(); }
{ let s: ~str = ~"a big ol' string"; let t: ~str = ~"a big ol' string"; assert!(s == t); let u: ~str = ~"a bad ol' string"; assert!((s != u)); }
identifier_body
associated-types-conditional-dispatch.rs
// run-pass // Test that we evaluate projection predicates to winnow out // candidates during trait selection and method resolution (#20296). // If we don't properly winnow out candidates based on the output type // `Target=[A]`, then the impl marked with `(*)` is seen to conflict // with all the others. // pretty-expanded FIXME #23616 use std::marker::PhantomData; use std::ops::Deref; pub trait MyEq<U:?Sized=Self> { fn eq(&self, u: &U) -> bool; } impl<A, B> MyEq<[B]> for [A] where A : MyEq<B> { fn eq(&self, other: &[B]) -> bool { self.len() == other.len() && self.iter().zip(other).all(|(a, b)| MyEq::eq(a, b)) } } // (*) This impl conflicts with everything unless the `Target=[A]` // constraint is considered. impl<'a, A, B, Lhs> MyEq<[B; 0]> for Lhs where A: MyEq<B>, Lhs: Deref<Target=[A]> { fn eq(&self, other: &[B; 0]) -> bool { MyEq::eq(&**self, other) } } struct DerefWithHelper<H, T> { pub helper: H, pub marker: PhantomData<T>, } trait Helper<T> { fn helper_borrow(&self) -> &T; } impl<T> Helper<T> for Option<T> { fn
(&self) -> &T { self.as_ref().unwrap() } } impl<T, H: Helper<T>> Deref for DerefWithHelper<H, T> { type Target = T; fn deref(&self) -> &T { self.helper.helper_borrow() } } pub fn check<T: MyEq>(x: T, y: T) -> bool { let d: DerefWithHelper<Option<T>, T> = DerefWithHelper { helper: Some(x), marker: PhantomData }; d.eq(&y) } pub fn main() { }
helper_borrow
identifier_name
associated-types-conditional-dispatch.rs
// run-pass // Test that we evaluate projection predicates to winnow out // candidates during trait selection and method resolution (#20296). // If we don't properly winnow out candidates based on the output type // `Target=[A]`, then the impl marked with `(*)` is seen to conflict // with all the others. // pretty-expanded FIXME #23616 use std::marker::PhantomData; use std::ops::Deref; pub trait MyEq<U:?Sized=Self> { fn eq(&self, u: &U) -> bool; } impl<A, B> MyEq<[B]> for [A] where A : MyEq<B> { fn eq(&self, other: &[B]) -> bool { self.len() == other.len() && self.iter().zip(other).all(|(a, b)| MyEq::eq(a, b)) } } // (*) This impl conflicts with everything unless the `Target=[A]` // constraint is considered. impl<'a, A, B, Lhs> MyEq<[B; 0]> for Lhs where A: MyEq<B>, Lhs: Deref<Target=[A]> { fn eq(&self, other: &[B; 0]) -> bool { MyEq::eq(&**self, other) } } struct DerefWithHelper<H, T> { pub helper: H, pub marker: PhantomData<T>, } trait Helper<T> { fn helper_borrow(&self) -> &T; } impl<T> Helper<T> for Option<T> { fn helper_borrow(&self) -> &T { self.as_ref().unwrap() } } impl<T, H: Helper<T>> Deref for DerefWithHelper<H, T> { type Target = T; fn deref(&self) -> &T { self.helper.helper_borrow() } } pub fn check<T: MyEq>(x: T, y: T) -> bool { let d: DerefWithHelper<Option<T>, T> = DerefWithHelper { helper: Some(x), marker: PhantomData }; d.eq(&y) } pub fn main()
{ }
identifier_body
associated-types-conditional-dispatch.rs
// run-pass // Test that we evaluate projection predicates to winnow out // candidates during trait selection and method resolution (#20296). // If we don't properly winnow out candidates based on the output type // `Target=[A]`, then the impl marked with `(*)` is seen to conflict // with all the others. // pretty-expanded FIXME #23616 use std::marker::PhantomData; use std::ops::Deref; pub trait MyEq<U:?Sized=Self> { fn eq(&self, u: &U) -> bool; } impl<A, B> MyEq<[B]> for [A] where A : MyEq<B> { fn eq(&self, other: &[B]) -> bool { self.len() == other.len() && self.iter().zip(other).all(|(a, b)| MyEq::eq(a, b)) } } // (*) This impl conflicts with everything unless the `Target=[A]` // constraint is considered. impl<'a, A, B, Lhs> MyEq<[B; 0]> for Lhs where A: MyEq<B>, Lhs: Deref<Target=[A]> { fn eq(&self, other: &[B; 0]) -> bool { MyEq::eq(&**self, other) } } struct DerefWithHelper<H, T> { pub helper: H, pub marker: PhantomData<T>, }
fn helper_borrow(&self) -> &T { self.as_ref().unwrap() } } impl<T, H: Helper<T>> Deref for DerefWithHelper<H, T> { type Target = T; fn deref(&self) -> &T { self.helper.helper_borrow() } } pub fn check<T: MyEq>(x: T, y: T) -> bool { let d: DerefWithHelper<Option<T>, T> = DerefWithHelper { helper: Some(x), marker: PhantomData }; d.eq(&y) } pub fn main() { }
trait Helper<T> { fn helper_borrow(&self) -> &T; } impl<T> Helper<T> for Option<T> {
random_line_split
opts.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Configuration options for a single run of the servo application. Created //! from command line arguments. use geometry::ScreenPx; use geom::scale_factor::ScaleFactor; use geom::size::TypedSize2D; use layers::geometry::DevicePixel; use getopts; use std::collections::HashSet; use std::cmp; use std::old_io as io; use std::mem; use std::os; use std::ptr; use std::rt; /// Global flags for Servo, currently set on the command line. #[derive(Clone)] pub struct
{ /// The initial URLs to load. pub urls: Vec<String>, /// How many threads to use for CPU painting (`-t`). /// /// FIXME(pcwalton): This is not currently used. All painting is sequential. pub n_paint_threads: uint, /// True to use GPU painting via Skia-GL, false to use CPU painting via Skia (`-g`). Note that /// compositing is always done on the GPU. pub gpu_painting: bool, /// The maximum size of each tile in pixels (`-s`). pub tile_size: uint, /// The ratio of device pixels per px at the default scale. If unspecified, will use the /// platform default setting. pub device_pixels_per_px: Option<ScaleFactor<ScreenPx, DevicePixel, f32>>, /// `None` to disable the time profiler or `Some` with an interval in seconds to enable it and /// cause it to produce output on that interval (`-p`). pub time_profiler_period: Option<f64>, /// `None` to disable the memory profiler or `Some` with an interval in seconds to enable it /// and cause it to produce output on that interval (`-m`). pub memory_profiler_period: Option<f64>, /// Enable experimental web features (`-e`). pub enable_experimental: bool, /// The number of threads to use for layout (`-y`). Defaults to 1, which results in a recursive /// sequential algorithm. pub layout_threads: uint, pub nonincremental_layout: bool, pub output_file: Option<String>, pub headless: bool, pub hard_fail: bool, /// True if we should bubble intrinsic widths sequentially (`-b`). If this is true, then /// intrinsic widths are computed as a separate pass instead of during flow construction. You /// may wish to turn this flag on in order to benchmark style recalculation against other /// browser engines. pub bubble_inline_sizes_separately: bool, /// True if we should show borders on all layers and tiles for /// debugging purposes (`--show-debug-borders`). pub show_debug_borders: bool, /// True if we should show borders on all fragments for debugging purposes (`--show-debug-fragment-borders`). pub show_debug_fragment_borders: bool, /// If set with --disable-text-aa, disable antialiasing on fonts. This is primarily useful for reftests /// where pixel perfect results are required when using fonts such as the Ahem /// font for layout tests. pub enable_text_antialiasing: bool, /// True if each step of layout is traced to an external JSON file /// for debugging purposes. Settings this implies sequential layout /// and paint. pub trace_layout: bool, /// If true, instrument the runtime for each task created and dump /// that information to a JSON file that can be viewed in the task /// profile viewer. pub profile_tasks: bool, /// `None` to disable devtools or `Some` with a port number to start a server to listen to /// remote Firefox devtools connections. pub devtools_port: Option<u16>, /// The initial requested size of the window. pub initial_window_size: TypedSize2D<ScreenPx, uint>, /// An optional string allowing the user agent to be set for testing. pub user_agent: Option<String>, /// Dumps the flow tree after a layout. pub dump_flow_tree: bool, /// Whether to show an error when display list geometry escapes flow overflow regions. pub validate_display_list_geometry: bool, /// A specific path to find required resources (such as user-agent.css). pub resources_path: Option<String>, } fn print_usage(app: &str, opts: &[getopts::OptGroup]) { let message = format!("Usage: {} [ options... ] [URL]\n\twhere options include", app); println!("{}", getopts::usage(message.as_slice(), opts)); } pub fn print_debug_usage(app: &str) { fn print_option(name: &str, description: &str) { println!("\t{:<35} {}", name, description); } println!("Usage: {} debug option,[options,...]\n\twhere options include\n\nOptions:", app); print_option("bubble-widths", "Bubble intrinsic widths separately like other engines."); print_option("disable-text-aa", "Disable antialiasing of rendered text."); print_option("dump-flow-tree", "Print the flow tree after each layout."); print_option("profile-tasks", "Instrument each task, writing the output to a file."); print_option("show-compositor-borders", "Paint borders along layer and tile boundaries."); print_option("show-fragment-borders", "Paint borders along fragment boundaries."); print_option("trace-layout", "Write layout trace to an external file for debugging."); print_option("validate-display-list-geometry", "Display an error when display list geometry escapes overflow region."); println!(""); } fn args_fail(msg: &str) { io::stderr().write_line(msg).unwrap(); os::set_exit_status(1); } // Always use CPU painting on android. #[cfg(target_os="android")] static FORCE_CPU_PAINTING: bool = true; #[cfg(not(target_os="android"))] static FORCE_CPU_PAINTING: bool = false; pub fn default_opts() -> Opts { Opts { urls: vec!(), n_paint_threads: 1, gpu_painting: false, tile_size: 512, device_pixels_per_px: None, time_profiler_period: None, memory_profiler_period: None, enable_experimental: false, layout_threads: 1, nonincremental_layout: false, output_file: None, headless: true, hard_fail: true, bubble_inline_sizes_separately: false, show_debug_borders: false, show_debug_fragment_borders: false, enable_text_antialiasing: false, trace_layout: false, devtools_port: None, initial_window_size: TypedSize2D(800, 600), user_agent: None, dump_flow_tree: false, validate_display_list_geometry: false, profile_tasks: false, resources_path: None, } } pub fn from_cmdline_args(args: &[String]) -> bool { let app_name = args[0].to_string(); let args = args.tail(); let opts = vec!( getopts::optflag("c", "cpu", "CPU painting (default)"), getopts::optflag("g", "gpu", "GPU painting"), getopts::optopt("o", "output", "Output file", "output.png"), getopts::optopt("s", "size", "Size of tiles", "512"), getopts::optopt("", "device-pixel-ratio", "Device pixels per px", ""), getopts::optflag("e", "experimental", "Enable experimental web features"), getopts::optopt("t", "threads", "Number of paint threads", "1"), getopts::optflagopt("p", "profile", "Profiler flag and output interval", "10"), getopts::optflagopt("m", "memory-profile", "Memory profiler flag and output interval", "10"), getopts::optflag("x", "exit", "Exit after load flag"), getopts::optopt("y", "layout-threads", "Number of threads to use for layout", "1"), getopts::optflag("i", "nonincremental-layout", "Enable to turn off incremental layout."), getopts::optflag("z", "headless", "Headless mode"), getopts::optflag("f", "hard-fail", "Exit on task failure instead of displaying about:failure"), getopts::optflagopt("", "devtools", "Start remote devtools server on port", "6000"), getopts::optopt("", "resolution", "Set window resolution.", "800x600"), getopts::optopt("u", "user-agent", "Set custom user agent string", "NCSA Mosaic/1.0 (X11;SunOS 4.1.4 sun4m)"), getopts::optopt("Z", "debug", "A comma-separated string of debug options. Pass help to show available options.", ""), getopts::optflag("h", "help", "Print this message"), getopts::optopt("r", "render-api", "Set the rendering API to use", "gl|mesa"), getopts::optopt("", "resources-path", "Path to find static resources", "/home/servo/resources"), ); let opt_match = match getopts::getopts(args, opts.as_slice()) { Ok(m) => m, Err(f) => { args_fail(format!("{}", f).as_slice()); return false; } }; if opt_match.opt_present("h") || opt_match.opt_present("help") { print_usage(app_name.as_slice(), opts.as_slice()); return false; }; let debug_string = match opt_match.opt_str("Z") { Some(string) => string, None => String::new() }; let mut debug_options = HashSet::new(); for split in debug_string.as_slice().split(',') { debug_options.insert(split.clone()); } if debug_options.contains(&"help") { print_debug_usage(app_name.as_slice()); return false; } let urls = if opt_match.free.is_empty() { print_usage(app_name.as_slice(), opts.as_slice()); args_fail("servo asks that you provide 1 or more URLs"); return false; } else { opt_match.free.clone() }; let tile_size: uint = match opt_match.opt_str("s") { Some(tile_size_str) => tile_size_str.parse().unwrap(), None => 512, }; let device_pixels_per_px = opt_match.opt_str("device-pixel-ratio").map(|dppx_str| ScaleFactor(dppx_str.parse().unwrap()) ); let mut n_paint_threads: uint = match opt_match.opt_str("t") { Some(n_paint_threads_str) => n_paint_threads_str.parse().unwrap(), None => 1, // FIXME: Number of cores. }; // If only the flag is present, default to a 5 second period for both profilers. let time_profiler_period = opt_match.opt_default("p", "5").map(|period| { period.parse().unwrap() }); let memory_profiler_period = opt_match.opt_default("m", "5").map(|period| { period.parse().unwrap() }); let gpu_painting =!FORCE_CPU_PAINTING && opt_match.opt_present("g"); let mut layout_threads: uint = match opt_match.opt_str("y") { Some(layout_threads_str) => layout_threads_str.parse().unwrap(), None => cmp::max(rt::default_sched_threads() * 3 / 4, 1), }; let nonincremental_layout = opt_match.opt_present("i"); let mut bubble_inline_sizes_separately = debug_options.contains(&"bubble-widths"); let trace_layout = debug_options.contains(&"trace-layout"); if trace_layout { n_paint_threads = 1; layout_threads = 1; bubble_inline_sizes_separately = true; } let devtools_port = opt_match.opt_default("devtools", "6000").map(|port| { port.parse().unwrap() }); let initial_window_size = match opt_match.opt_str("resolution") { Some(res_string) => { let res: Vec<uint> = res_string.split('x').map(|r| r.parse().unwrap()).collect(); TypedSize2D(res[0], res[1]) } None => { TypedSize2D(800, 600) } }; let opts = Opts { urls: urls, n_paint_threads: n_paint_threads, gpu_painting: gpu_painting, tile_size: tile_size, device_pixels_per_px: device_pixels_per_px, time_profiler_period: time_profiler_period, memory_profiler_period: memory_profiler_period, enable_experimental: opt_match.opt_present("e"), layout_threads: layout_threads, nonincremental_layout: nonincremental_layout, output_file: opt_match.opt_str("o"), headless: opt_match.opt_present("z"), hard_fail: opt_match.opt_present("f"), bubble_inline_sizes_separately: bubble_inline_sizes_separately, profile_tasks: debug_options.contains(&"profile-tasks"), trace_layout: trace_layout, devtools_port: devtools_port, initial_window_size: initial_window_size, user_agent: opt_match.opt_str("u"), show_debug_borders: debug_options.contains(&"show-compositor-borders"), show_debug_fragment_borders: debug_options.contains(&"show-fragment-borders"), enable_text_antialiasing:!debug_options.contains(&"disable-text-aa"), dump_flow_tree: debug_options.contains(&"dump-flow-tree"), validate_display_list_geometry: debug_options.contains(&"validate-display-list-geometry"), resources_path: opt_match.opt_str("resources-path"), }; set_opts(opts); true } static mut EXPERIMENTAL_ENABLED: bool = false; pub fn set_experimental_enabled(new_value: bool) { unsafe { EXPERIMENTAL_ENABLED = new_value; } } pub fn experimental_enabled() -> bool { unsafe { EXPERIMENTAL_ENABLED } } // Make Opts available globally. This saves having to clone and pass // opts everywhere it is used, which gets particularly cumbersome // when passing through the DOM structures. static mut OPTIONS: *mut Opts = 0 as *mut Opts; pub fn set_opts(opts: Opts) { unsafe { let box_opts = box opts; OPTIONS = mem::transmute(box_opts); } } #[inline] pub fn get<'a>() -> &'a Opts { unsafe { // If code attempts to retrieve the options and they haven't // been set by the platform init code, just return a default // set of options. This is mostly useful for unit tests that // run through a code path which queries the cmd line options. if OPTIONS == ptr::null_mut() { set_opts(default_opts()); } mem::transmute(OPTIONS) } }
Opts
identifier_name
opts.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Configuration options for a single run of the servo application. Created //! from command line arguments. use geometry::ScreenPx; use geom::scale_factor::ScaleFactor; use geom::size::TypedSize2D; use layers::geometry::DevicePixel; use getopts; use std::collections::HashSet; use std::cmp; use std::old_io as io; use std::mem; use std::os; use std::ptr; use std::rt; /// Global flags for Servo, currently set on the command line. #[derive(Clone)] pub struct Opts { /// The initial URLs to load. pub urls: Vec<String>, /// How many threads to use for CPU painting (`-t`). /// /// FIXME(pcwalton): This is not currently used. All painting is sequential. pub n_paint_threads: uint, /// True to use GPU painting via Skia-GL, false to use CPU painting via Skia (`-g`). Note that /// compositing is always done on the GPU. pub gpu_painting: bool, /// The maximum size of each tile in pixels (`-s`). pub tile_size: uint, /// The ratio of device pixels per px at the default scale. If unspecified, will use the /// platform default setting. pub device_pixels_per_px: Option<ScaleFactor<ScreenPx, DevicePixel, f32>>, /// `None` to disable the time profiler or `Some` with an interval in seconds to enable it and /// cause it to produce output on that interval (`-p`). pub time_profiler_period: Option<f64>, /// `None` to disable the memory profiler or `Some` with an interval in seconds to enable it /// and cause it to produce output on that interval (`-m`). pub memory_profiler_period: Option<f64>, /// Enable experimental web features (`-e`). pub enable_experimental: bool, /// The number of threads to use for layout (`-y`). Defaults to 1, which results in a recursive /// sequential algorithm. pub layout_threads: uint, pub nonincremental_layout: bool, pub output_file: Option<String>, pub headless: bool, pub hard_fail: bool, /// True if we should bubble intrinsic widths sequentially (`-b`). If this is true, then /// intrinsic widths are computed as a separate pass instead of during flow construction. You /// may wish to turn this flag on in order to benchmark style recalculation against other /// browser engines. pub bubble_inline_sizes_separately: bool, /// True if we should show borders on all layers and tiles for /// debugging purposes (`--show-debug-borders`). pub show_debug_borders: bool, /// True if we should show borders on all fragments for debugging purposes (`--show-debug-fragment-borders`). pub show_debug_fragment_borders: bool, /// If set with --disable-text-aa, disable antialiasing on fonts. This is primarily useful for reftests /// where pixel perfect results are required when using fonts such as the Ahem /// font for layout tests. pub enable_text_antialiasing: bool, /// True if each step of layout is traced to an external JSON file /// for debugging purposes. Settings this implies sequential layout /// and paint. pub trace_layout: bool, /// If true, instrument the runtime for each task created and dump /// that information to a JSON file that can be viewed in the task /// profile viewer. pub profile_tasks: bool, /// `None` to disable devtools or `Some` with a port number to start a server to listen to /// remote Firefox devtools connections. pub devtools_port: Option<u16>, /// The initial requested size of the window. pub initial_window_size: TypedSize2D<ScreenPx, uint>, /// An optional string allowing the user agent to be set for testing. pub user_agent: Option<String>, /// Dumps the flow tree after a layout. pub dump_flow_tree: bool, /// Whether to show an error when display list geometry escapes flow overflow regions. pub validate_display_list_geometry: bool, /// A specific path to find required resources (such as user-agent.css). pub resources_path: Option<String>, } fn print_usage(app: &str, opts: &[getopts::OptGroup]) { let message = format!("Usage: {} [ options... ] [URL]\n\twhere options include", app); println!("{}", getopts::usage(message.as_slice(), opts)); } pub fn print_debug_usage(app: &str) { fn print_option(name: &str, description: &str) { println!("\t{:<35} {}", name, description); } println!("Usage: {} debug option,[options,...]\n\twhere options include\n\nOptions:", app); print_option("bubble-widths", "Bubble intrinsic widths separately like other engines."); print_option("disable-text-aa", "Disable antialiasing of rendered text."); print_option("dump-flow-tree", "Print the flow tree after each layout."); print_option("profile-tasks", "Instrument each task, writing the output to a file."); print_option("show-compositor-borders", "Paint borders along layer and tile boundaries."); print_option("show-fragment-borders", "Paint borders along fragment boundaries."); print_option("trace-layout", "Write layout trace to an external file for debugging."); print_option("validate-display-list-geometry", "Display an error when display list geometry escapes overflow region."); println!(""); } fn args_fail(msg: &str) { io::stderr().write_line(msg).unwrap(); os::set_exit_status(1); } // Always use CPU painting on android. #[cfg(target_os="android")] static FORCE_CPU_PAINTING: bool = true; #[cfg(not(target_os="android"))] static FORCE_CPU_PAINTING: bool = false; pub fn default_opts() -> Opts { Opts { urls: vec!(), n_paint_threads: 1, gpu_painting: false, tile_size: 512, device_pixels_per_px: None, time_profiler_period: None, memory_profiler_period: None, enable_experimental: false, layout_threads: 1, nonincremental_layout: false, output_file: None, headless: true, hard_fail: true, bubble_inline_sizes_separately: false, show_debug_borders: false, show_debug_fragment_borders: false, enable_text_antialiasing: false, trace_layout: false, devtools_port: None, initial_window_size: TypedSize2D(800, 600), user_agent: None, dump_flow_tree: false, validate_display_list_geometry: false, profile_tasks: false, resources_path: None, } } pub fn from_cmdline_args(args: &[String]) -> bool { let app_name = args[0].to_string(); let args = args.tail(); let opts = vec!( getopts::optflag("c", "cpu", "CPU painting (default)"), getopts::optflag("g", "gpu", "GPU painting"), getopts::optopt("o", "output", "Output file", "output.png"), getopts::optopt("s", "size", "Size of tiles", "512"), getopts::optopt("", "device-pixel-ratio", "Device pixels per px", ""), getopts::optflag("e", "experimental", "Enable experimental web features"), getopts::optopt("t", "threads", "Number of paint threads", "1"), getopts::optflagopt("p", "profile", "Profiler flag and output interval", "10"), getopts::optflagopt("m", "memory-profile", "Memory profiler flag and output interval", "10"), getopts::optflag("x", "exit", "Exit after load flag"), getopts::optopt("y", "layout-threads", "Number of threads to use for layout", "1"), getopts::optflag("i", "nonincremental-layout", "Enable to turn off incremental layout."), getopts::optflag("z", "headless", "Headless mode"), getopts::optflag("f", "hard-fail", "Exit on task failure instead of displaying about:failure"), getopts::optflagopt("", "devtools", "Start remote devtools server on port", "6000"), getopts::optopt("", "resolution", "Set window resolution.", "800x600"), getopts::optopt("u", "user-agent", "Set custom user agent string", "NCSA Mosaic/1.0 (X11;SunOS 4.1.4 sun4m)"), getopts::optopt("Z", "debug", "A comma-separated string of debug options. Pass help to show available options.", ""), getopts::optflag("h", "help", "Print this message"), getopts::optopt("r", "render-api", "Set the rendering API to use", "gl|mesa"), getopts::optopt("", "resources-path", "Path to find static resources", "/home/servo/resources"), ); let opt_match = match getopts::getopts(args, opts.as_slice()) { Ok(m) => m, Err(f) => { args_fail(format!("{}", f).as_slice()); return false; } }; if opt_match.opt_present("h") || opt_match.opt_present("help") { print_usage(app_name.as_slice(), opts.as_slice()); return false; }; let debug_string = match opt_match.opt_str("Z") { Some(string) => string, None => String::new() }; let mut debug_options = HashSet::new(); for split in debug_string.as_slice().split(',') { debug_options.insert(split.clone()); } if debug_options.contains(&"help") { print_debug_usage(app_name.as_slice()); return false; } let urls = if opt_match.free.is_empty() { print_usage(app_name.as_slice(), opts.as_slice()); args_fail("servo asks that you provide 1 or more URLs"); return false; } else { opt_match.free.clone() }; let tile_size: uint = match opt_match.opt_str("s") { Some(tile_size_str) => tile_size_str.parse().unwrap(), None => 512, }; let device_pixels_per_px = opt_match.opt_str("device-pixel-ratio").map(|dppx_str| ScaleFactor(dppx_str.parse().unwrap()) ); let mut n_paint_threads: uint = match opt_match.opt_str("t") { Some(n_paint_threads_str) => n_paint_threads_str.parse().unwrap(), None => 1, // FIXME: Number of cores. }; // If only the flag is present, default to a 5 second period for both profilers. let time_profiler_period = opt_match.opt_default("p", "5").map(|period| { period.parse().unwrap() }); let memory_profiler_period = opt_match.opt_default("m", "5").map(|period| { period.parse().unwrap() }); let gpu_painting =!FORCE_CPU_PAINTING && opt_match.opt_present("g"); let mut layout_threads: uint = match opt_match.opt_str("y") { Some(layout_threads_str) => layout_threads_str.parse().unwrap(), None => cmp::max(rt::default_sched_threads() * 3 / 4, 1), }; let nonincremental_layout = opt_match.opt_present("i"); let mut bubble_inline_sizes_separately = debug_options.contains(&"bubble-widths"); let trace_layout = debug_options.contains(&"trace-layout"); if trace_layout { n_paint_threads = 1; layout_threads = 1; bubble_inline_sizes_separately = true; } let devtools_port = opt_match.opt_default("devtools", "6000").map(|port| { port.parse().unwrap() }); let initial_window_size = match opt_match.opt_str("resolution") { Some(res_string) => { let res: Vec<uint> = res_string.split('x').map(|r| r.parse().unwrap()).collect(); TypedSize2D(res[0], res[1]) } None => { TypedSize2D(800, 600) } }; let opts = Opts { urls: urls, n_paint_threads: n_paint_threads, gpu_painting: gpu_painting, tile_size: tile_size, device_pixels_per_px: device_pixels_per_px, time_profiler_period: time_profiler_period, memory_profiler_period: memory_profiler_period, enable_experimental: opt_match.opt_present("e"), layout_threads: layout_threads, nonincremental_layout: nonincremental_layout, output_file: opt_match.opt_str("o"), headless: opt_match.opt_present("z"), hard_fail: opt_match.opt_present("f"), bubble_inline_sizes_separately: bubble_inline_sizes_separately, profile_tasks: debug_options.contains(&"profile-tasks"), trace_layout: trace_layout, devtools_port: devtools_port, initial_window_size: initial_window_size, user_agent: opt_match.opt_str("u"), show_debug_borders: debug_options.contains(&"show-compositor-borders"), show_debug_fragment_borders: debug_options.contains(&"show-fragment-borders"), enable_text_antialiasing:!debug_options.contains(&"disable-text-aa"), dump_flow_tree: debug_options.contains(&"dump-flow-tree"), validate_display_list_geometry: debug_options.contains(&"validate-display-list-geometry"), resources_path: opt_match.opt_str("resources-path"), }; set_opts(opts); true } static mut EXPERIMENTAL_ENABLED: bool = false; pub fn set_experimental_enabled(new_value: bool) { unsafe { EXPERIMENTAL_ENABLED = new_value; } } pub fn experimental_enabled() -> bool { unsafe { EXPERIMENTAL_ENABLED } } // Make Opts available globally. This saves having to clone and pass // opts everywhere it is used, which gets particularly cumbersome // when passing through the DOM structures. static mut OPTIONS: *mut Opts = 0 as *mut Opts; pub fn set_opts(opts: Opts) { unsafe { let box_opts = box opts; OPTIONS = mem::transmute(box_opts); } } #[inline] pub fn get<'a>() -> &'a Opts
{ unsafe { // If code attempts to retrieve the options and they haven't // been set by the platform init code, just return a default // set of options. This is mostly useful for unit tests that // run through a code path which queries the cmd line options. if OPTIONS == ptr::null_mut() { set_opts(default_opts()); } mem::transmute(OPTIONS) } }
identifier_body
opts.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Configuration options for a single run of the servo application. Created //! from command line arguments. use geometry::ScreenPx; use geom::scale_factor::ScaleFactor; use geom::size::TypedSize2D; use layers::geometry::DevicePixel; use getopts; use std::collections::HashSet; use std::cmp; use std::old_io as io; use std::mem; use std::os; use std::ptr; use std::rt; /// Global flags for Servo, currently set on the command line. #[derive(Clone)] pub struct Opts { /// The initial URLs to load. pub urls: Vec<String>, /// How many threads to use for CPU painting (`-t`). /// /// FIXME(pcwalton): This is not currently used. All painting is sequential. pub n_paint_threads: uint, /// True to use GPU painting via Skia-GL, false to use CPU painting via Skia (`-g`). Note that /// compositing is always done on the GPU. pub gpu_painting: bool, /// The maximum size of each tile in pixels (`-s`). pub tile_size: uint, /// The ratio of device pixels per px at the default scale. If unspecified, will use the /// platform default setting. pub device_pixels_per_px: Option<ScaleFactor<ScreenPx, DevicePixel, f32>>, /// `None` to disable the time profiler or `Some` with an interval in seconds to enable it and /// cause it to produce output on that interval (`-p`). pub time_profiler_period: Option<f64>, /// `None` to disable the memory profiler or `Some` with an interval in seconds to enable it /// and cause it to produce output on that interval (`-m`). pub memory_profiler_period: Option<f64>, /// Enable experimental web features (`-e`). pub enable_experimental: bool, /// The number of threads to use for layout (`-y`). Defaults to 1, which results in a recursive /// sequential algorithm. pub layout_threads: uint, pub nonincremental_layout: bool, pub output_file: Option<String>, pub headless: bool, pub hard_fail: bool, /// True if we should bubble intrinsic widths sequentially (`-b`). If this is true, then /// intrinsic widths are computed as a separate pass instead of during flow construction. You /// may wish to turn this flag on in order to benchmark style recalculation against other /// browser engines. pub bubble_inline_sizes_separately: bool, /// True if we should show borders on all layers and tiles for /// debugging purposes (`--show-debug-borders`). pub show_debug_borders: bool, /// True if we should show borders on all fragments for debugging purposes (`--show-debug-fragment-borders`). pub show_debug_fragment_borders: bool, /// If set with --disable-text-aa, disable antialiasing on fonts. This is primarily useful for reftests /// where pixel perfect results are required when using fonts such as the Ahem /// font for layout tests. pub enable_text_antialiasing: bool, /// True if each step of layout is traced to an external JSON file /// for debugging purposes. Settings this implies sequential layout /// and paint. pub trace_layout: bool, /// If true, instrument the runtime for each task created and dump /// that information to a JSON file that can be viewed in the task /// profile viewer. pub profile_tasks: bool, /// `None` to disable devtools or `Some` with a port number to start a server to listen to /// remote Firefox devtools connections. pub devtools_port: Option<u16>, /// The initial requested size of the window. pub initial_window_size: TypedSize2D<ScreenPx, uint>, /// An optional string allowing the user agent to be set for testing. pub user_agent: Option<String>, /// Dumps the flow tree after a layout. pub dump_flow_tree: bool, /// Whether to show an error when display list geometry escapes flow overflow regions. pub validate_display_list_geometry: bool, /// A specific path to find required resources (such as user-agent.css). pub resources_path: Option<String>, } fn print_usage(app: &str, opts: &[getopts::OptGroup]) { let message = format!("Usage: {} [ options... ] [URL]\n\twhere options include", app); println!("{}", getopts::usage(message.as_slice(), opts)); } pub fn print_debug_usage(app: &str) { fn print_option(name: &str, description: &str) { println!("\t{:<35} {}", name, description); } println!("Usage: {} debug option,[options,...]\n\twhere options include\n\nOptions:", app); print_option("bubble-widths", "Bubble intrinsic widths separately like other engines."); print_option("disable-text-aa", "Disable antialiasing of rendered text."); print_option("dump-flow-tree", "Print the flow tree after each layout."); print_option("profile-tasks", "Instrument each task, writing the output to a file."); print_option("show-compositor-borders", "Paint borders along layer and tile boundaries."); print_option("show-fragment-borders", "Paint borders along fragment boundaries."); print_option("trace-layout", "Write layout trace to an external file for debugging."); print_option("validate-display-list-geometry", "Display an error when display list geometry escapes overflow region."); println!(""); } fn args_fail(msg: &str) { io::stderr().write_line(msg).unwrap(); os::set_exit_status(1); } // Always use CPU painting on android. #[cfg(target_os="android")] static FORCE_CPU_PAINTING: bool = true; #[cfg(not(target_os="android"))] static FORCE_CPU_PAINTING: bool = false; pub fn default_opts() -> Opts { Opts { urls: vec!(), n_paint_threads: 1, gpu_painting: false, tile_size: 512, device_pixels_per_px: None, time_profiler_period: None, memory_profiler_period: None, enable_experimental: false, layout_threads: 1, nonincremental_layout: false, output_file: None, headless: true, hard_fail: true, bubble_inline_sizes_separately: false, show_debug_borders: false, show_debug_fragment_borders: false, enable_text_antialiasing: false, trace_layout: false, devtools_port: None, initial_window_size: TypedSize2D(800, 600), user_agent: None, dump_flow_tree: false, validate_display_list_geometry: false, profile_tasks: false, resources_path: None, } } pub fn from_cmdline_args(args: &[String]) -> bool { let app_name = args[0].to_string(); let args = args.tail(); let opts = vec!( getopts::optflag("c", "cpu", "CPU painting (default)"), getopts::optflag("g", "gpu", "GPU painting"), getopts::optopt("o", "output", "Output file", "output.png"), getopts::optopt("s", "size", "Size of tiles", "512"), getopts::optopt("", "device-pixel-ratio", "Device pixels per px", ""), getopts::optflag("e", "experimental", "Enable experimental web features"), getopts::optopt("t", "threads", "Number of paint threads", "1"), getopts::optflagopt("p", "profile", "Profiler flag and output interval", "10"), getopts::optflagopt("m", "memory-profile", "Memory profiler flag and output interval", "10"), getopts::optflag("x", "exit", "Exit after load flag"), getopts::optopt("y", "layout-threads", "Number of threads to use for layout", "1"), getopts::optflag("i", "nonincremental-layout", "Enable to turn off incremental layout."), getopts::optflag("z", "headless", "Headless mode"), getopts::optflag("f", "hard-fail", "Exit on task failure instead of displaying about:failure"), getopts::optflagopt("", "devtools", "Start remote devtools server on port", "6000"), getopts::optopt("", "resolution", "Set window resolution.", "800x600"), getopts::optopt("u", "user-agent", "Set custom user agent string", "NCSA Mosaic/1.0 (X11;SunOS 4.1.4 sun4m)"), getopts::optopt("Z", "debug", "A comma-separated string of debug options. Pass help to show available options.", ""), getopts::optflag("h", "help", "Print this message"), getopts::optopt("r", "render-api", "Set the rendering API to use", "gl|mesa"), getopts::optopt("", "resources-path", "Path to find static resources", "/home/servo/resources"), ); let opt_match = match getopts::getopts(args, opts.as_slice()) { Ok(m) => m, Err(f) => { args_fail(format!("{}", f).as_slice()); return false; } }; if opt_match.opt_present("h") || opt_match.opt_present("help") { print_usage(app_name.as_slice(), opts.as_slice()); return false; }; let debug_string = match opt_match.opt_str("Z") { Some(string) => string, None => String::new() }; let mut debug_options = HashSet::new(); for split in debug_string.as_slice().split(',') { debug_options.insert(split.clone()); } if debug_options.contains(&"help") { print_debug_usage(app_name.as_slice()); return false; } let urls = if opt_match.free.is_empty() { print_usage(app_name.as_slice(), opts.as_slice()); args_fail("servo asks that you provide 1 or more URLs"); return false; } else { opt_match.free.clone() }; let tile_size: uint = match opt_match.opt_str("s") { Some(tile_size_str) => tile_size_str.parse().unwrap(), None => 512, }; let device_pixels_per_px = opt_match.opt_str("device-pixel-ratio").map(|dppx_str| ScaleFactor(dppx_str.parse().unwrap()) ); let mut n_paint_threads: uint = match opt_match.opt_str("t") { Some(n_paint_threads_str) => n_paint_threads_str.parse().unwrap(), None => 1, // FIXME: Number of cores. }; // If only the flag is present, default to a 5 second period for both profilers. let time_profiler_period = opt_match.opt_default("p", "5").map(|period| { period.parse().unwrap() }); let memory_profiler_period = opt_match.opt_default("m", "5").map(|period| { period.parse().unwrap() }); let gpu_painting =!FORCE_CPU_PAINTING && opt_match.opt_present("g"); let mut layout_threads: uint = match opt_match.opt_str("y") { Some(layout_threads_str) => layout_threads_str.parse().unwrap(), None => cmp::max(rt::default_sched_threads() * 3 / 4, 1), }; let nonincremental_layout = opt_match.opt_present("i"); let mut bubble_inline_sizes_separately = debug_options.contains(&"bubble-widths"); let trace_layout = debug_options.contains(&"trace-layout"); if trace_layout { n_paint_threads = 1; layout_threads = 1; bubble_inline_sizes_separately = true; } let devtools_port = opt_match.opt_default("devtools", "6000").map(|port| { port.parse().unwrap() }); let initial_window_size = match opt_match.opt_str("resolution") { Some(res_string) => { let res: Vec<uint> = res_string.split('x').map(|r| r.parse().unwrap()).collect(); TypedSize2D(res[0], res[1]) } None => { TypedSize2D(800, 600) } }; let opts = Opts { urls: urls, n_paint_threads: n_paint_threads, gpu_painting: gpu_painting, tile_size: tile_size, device_pixels_per_px: device_pixels_per_px, time_profiler_period: time_profiler_period, memory_profiler_period: memory_profiler_period, enable_experimental: opt_match.opt_present("e"), layout_threads: layout_threads, nonincremental_layout: nonincremental_layout, output_file: opt_match.opt_str("o"), headless: opt_match.opt_present("z"), hard_fail: opt_match.opt_present("f"), bubble_inline_sizes_separately: bubble_inline_sizes_separately, profile_tasks: debug_options.contains(&"profile-tasks"), trace_layout: trace_layout, devtools_port: devtools_port, initial_window_size: initial_window_size, user_agent: opt_match.opt_str("u"), show_debug_borders: debug_options.contains(&"show-compositor-borders"), show_debug_fragment_borders: debug_options.contains(&"show-fragment-borders"), enable_text_antialiasing:!debug_options.contains(&"disable-text-aa"),
set_opts(opts); true } static mut EXPERIMENTAL_ENABLED: bool = false; pub fn set_experimental_enabled(new_value: bool) { unsafe { EXPERIMENTAL_ENABLED = new_value; } } pub fn experimental_enabled() -> bool { unsafe { EXPERIMENTAL_ENABLED } } // Make Opts available globally. This saves having to clone and pass // opts everywhere it is used, which gets particularly cumbersome // when passing through the DOM structures. static mut OPTIONS: *mut Opts = 0 as *mut Opts; pub fn set_opts(opts: Opts) { unsafe { let box_opts = box opts; OPTIONS = mem::transmute(box_opts); } } #[inline] pub fn get<'a>() -> &'a Opts { unsafe { // If code attempts to retrieve the options and they haven't // been set by the platform init code, just return a default // set of options. This is mostly useful for unit tests that // run through a code path which queries the cmd line options. if OPTIONS == ptr::null_mut() { set_opts(default_opts()); } mem::transmute(OPTIONS) } }
dump_flow_tree: debug_options.contains(&"dump-flow-tree"), validate_display_list_geometry: debug_options.contains(&"validate-display-list-geometry"), resources_path: opt_match.opt_str("resources-path"), };
random_line_split
readme_sync.rs
// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>. // This file may not be copied, modified, or distributed except according to those terms. // required by `FutureClient` (not used directly in this example) #![feature(plugin)] #![plugin(tarpc_plugins)] extern crate futures; #[macro_use] extern crate tarpc; extern crate tokio_core; use std::sync::mpsc; use std::thread; use tarpc::sync::{client, server}; use tarpc::sync::client::ClientExt; use tarpc::util::Never; service! { rpc hello(name: String) -> String; } #[derive(Clone)] struct HelloServer; impl SyncService for HelloServer { fn hello(&self, name: String) -> Result<String, Never> { Ok(format!("Hello from thread {}, {}!", thread::current().name().unwrap(), name)) }
let (tx, rx) = mpsc::channel(); thread::spawn(move || { let handle = HelloServer.listen("localhost:0", server::Options::default()).unwrap(); tx.send(handle.addr()).unwrap(); handle.run(); }); let client = SyncClient::connect(rx.recv().unwrap(), client::Options::default()).unwrap(); println!("{}", client.hello("Mom".to_string()).unwrap()); }
} fn main() {
random_line_split
readme_sync.rs
// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>. // This file may not be copied, modified, or distributed except according to those terms. // required by `FutureClient` (not used directly in this example) #![feature(plugin)] #![plugin(tarpc_plugins)] extern crate futures; #[macro_use] extern crate tarpc; extern crate tokio_core; use std::sync::mpsc; use std::thread; use tarpc::sync::{client, server}; use tarpc::sync::client::ClientExt; use tarpc::util::Never; service! { rpc hello(name: String) -> String; } #[derive(Clone)] struct
; impl SyncService for HelloServer { fn hello(&self, name: String) -> Result<String, Never> { Ok(format!("Hello from thread {}, {}!", thread::current().name().unwrap(), name)) } } fn main() { let (tx, rx) = mpsc::channel(); thread::spawn(move || { let handle = HelloServer.listen("localhost:0", server::Options::default()).unwrap(); tx.send(handle.addr()).unwrap(); handle.run(); }); let client = SyncClient::connect(rx.recv().unwrap(), client::Options::default()).unwrap(); println!("{}", client.hello("Mom".to_string()).unwrap()); }
HelloServer
identifier_name
router.rs
use regex::Regex; use hyper::Method; use hyper::server::Request; use context::{Context, Params, ParamType}; type Handler = fn(Context) -> ::Response; pub struct Router { pub routes: Vec<Route> } impl Router { pub fn new() -> Router { Router { routes: Vec::new() } } pub fn get(&mut self, path: &str, handler: Handler) { self.routes.push( Route::new(Method::Get, path, handler) ); } } #[derive(Clone)] pub struct Route { pub method: Method, pub path: RoutePath, pub handler: Handler } impl Route { pub fn new(method: Method, path: &str, handler: Handler) -> Route { Route { method: method, path: RoutePath::new(path), handler: handler } } pub fn matches_request(&self, request: &Request) -> Option<Params> { if self.method!= *request.method() { return None; } return self.path.matches_path(request.path()); } } #[derive(Clone, PartialEq, Debug)] pub enum PathToken { Str(String), Var { key: String, datatype: String } } #[derive(Clone, Debug, PartialEq)] pub struct RoutePath { pub tokenized_path: Vec<PathToken> } impl RoutePath { pub fn new(path: &str) -> Self { let vec_path = RoutePath::tokenize_path(path); RoutePath { tokenized_path: vec_path } } fn tokenize_path(path: &str) -> Vec<PathToken> { let path = &path[1..]; // Remove root let re = Regex::new(r"^\{([a-zA-Z_]+)\}$").unwrap(); let path_vec = path.split("/") .map(|t| { if re.is_match(t) { // Capture the variable name between {} let cap = re.captures(t).unwrap(); // There should be only one, grab it as str let key = cap.get(1).unwrap().as_str(); return PathToken::Var { key: String::from(key), datatype: String::from("string") } } PathToken::Str(String::from(t)) }) .collect::<Vec<PathToken>>(); path_vec } pub fn matches_path(&self, request_path: &str) -> Option<Params> { // Remove /, split on /, into vec of Strings let incoming_path = &request_path[1..].split("/").map(|i| { String::from(i) }).collect::<Vec<String>>(); // Both RoutePath and Request should have equal length tokenized paths if self.tokenized_path.len()!= incoming_path.len()
// Save url params while processing let mut params = Params::new(); for (index, token) in self.tokenized_path.iter().enumerate() { match token { &PathToken::Str(ref s) => { if *s!= incoming_path[index] { return None } }, &PathToken::Var {ref key, ref datatype} => { if datatype == "string" { params.insert( key.to_string(), ParamType::Str(incoming_path[index].to_string()) ); } } } } Some(params) } } #[cfg(test)] mod tests { use std::str::FromStr; use hyper::{Request, Response, Method, Uri}; use super::{Route, RoutePath, PathToken, Context}; fn generic_handler(_context: Context) -> Response { return Response::new(); } #[test] fn routepath_for_root() { let routepath = RoutePath::new("/"); assert_eq!(routepath.tokenized_path, vec![PathToken::Str(String::from(""))]); } #[test] fn routepath_for_user_profile() { let routepath = RoutePath::new("/user/profile"); assert_eq!(routepath.tokenized_path, vec![ PathToken::Str(String::from("user")), PathToken::Str(String::from("profile")) ] ) } #[test] fn route_should_be_matched() { let route = Route::new(Method::Get, "/monkeys", generic_handler); let path = Uri::from_str("http://example.com/monkeys").unwrap(); let request: Request = Request::new(Method::Get, path); assert!(route.matches_request(&request).is_some()); } #[test] fn route_should_not_be_matched() { let route = Route::new(Method::Get, "/monkeys", generic_handler); let path = Uri::from_str("http://example.com/nomatch").unwrap(); let request: Request = Request::new(Method::Get, path); assert!(route.matches_request(&request).is_none()); } #[test] fn route_should_match_with_variables() { let route = Route::new(Method::Get, "/user/{username}", generic_handler); let path = Uri::from_str("http://example.com/user/johndoe").unwrap(); let request: Request = Request::new(Method::Get, path); assert!(route.path.matches_path(request.path()).is_some()); assert!(route.matches_request(&request).is_some()); } }
{ return None; }
conditional_block
router.rs
use regex::Regex; use hyper::Method; use hyper::server::Request; use context::{Context, Params, ParamType}; type Handler = fn(Context) -> ::Response; pub struct Router { pub routes: Vec<Route> } impl Router { pub fn new() -> Router { Router { routes: Vec::new() } }
} } #[derive(Clone)] pub struct Route { pub method: Method, pub path: RoutePath, pub handler: Handler } impl Route { pub fn new(method: Method, path: &str, handler: Handler) -> Route { Route { method: method, path: RoutePath::new(path), handler: handler } } pub fn matches_request(&self, request: &Request) -> Option<Params> { if self.method!= *request.method() { return None; } return self.path.matches_path(request.path()); } } #[derive(Clone, PartialEq, Debug)] pub enum PathToken { Str(String), Var { key: String, datatype: String } } #[derive(Clone, Debug, PartialEq)] pub struct RoutePath { pub tokenized_path: Vec<PathToken> } impl RoutePath { pub fn new(path: &str) -> Self { let vec_path = RoutePath::tokenize_path(path); RoutePath { tokenized_path: vec_path } } fn tokenize_path(path: &str) -> Vec<PathToken> { let path = &path[1..]; // Remove root let re = Regex::new(r"^\{([a-zA-Z_]+)\}$").unwrap(); let path_vec = path.split("/") .map(|t| { if re.is_match(t) { // Capture the variable name between {} let cap = re.captures(t).unwrap(); // There should be only one, grab it as str let key = cap.get(1).unwrap().as_str(); return PathToken::Var { key: String::from(key), datatype: String::from("string") } } PathToken::Str(String::from(t)) }) .collect::<Vec<PathToken>>(); path_vec } pub fn matches_path(&self, request_path: &str) -> Option<Params> { // Remove /, split on /, into vec of Strings let incoming_path = &request_path[1..].split("/").map(|i| { String::from(i) }).collect::<Vec<String>>(); // Both RoutePath and Request should have equal length tokenized paths if self.tokenized_path.len()!= incoming_path.len() { return None; } // Save url params while processing let mut params = Params::new(); for (index, token) in self.tokenized_path.iter().enumerate() { match token { &PathToken::Str(ref s) => { if *s!= incoming_path[index] { return None } }, &PathToken::Var {ref key, ref datatype} => { if datatype == "string" { params.insert( key.to_string(), ParamType::Str(incoming_path[index].to_string()) ); } } } } Some(params) } } #[cfg(test)] mod tests { use std::str::FromStr; use hyper::{Request, Response, Method, Uri}; use super::{Route, RoutePath, PathToken, Context}; fn generic_handler(_context: Context) -> Response { return Response::new(); } #[test] fn routepath_for_root() { let routepath = RoutePath::new("/"); assert_eq!(routepath.tokenized_path, vec![PathToken::Str(String::from(""))]); } #[test] fn routepath_for_user_profile() { let routepath = RoutePath::new("/user/profile"); assert_eq!(routepath.tokenized_path, vec![ PathToken::Str(String::from("user")), PathToken::Str(String::from("profile")) ] ) } #[test] fn route_should_be_matched() { let route = Route::new(Method::Get, "/monkeys", generic_handler); let path = Uri::from_str("http://example.com/monkeys").unwrap(); let request: Request = Request::new(Method::Get, path); assert!(route.matches_request(&request).is_some()); } #[test] fn route_should_not_be_matched() { let route = Route::new(Method::Get, "/monkeys", generic_handler); let path = Uri::from_str("http://example.com/nomatch").unwrap(); let request: Request = Request::new(Method::Get, path); assert!(route.matches_request(&request).is_none()); } #[test] fn route_should_match_with_variables() { let route = Route::new(Method::Get, "/user/{username}", generic_handler); let path = Uri::from_str("http://example.com/user/johndoe").unwrap(); let request: Request = Request::new(Method::Get, path); assert!(route.path.matches_path(request.path()).is_some()); assert!(route.matches_request(&request).is_some()); } }
pub fn get(&mut self, path: &str, handler: Handler) { self.routes.push( Route::new(Method::Get, path, handler) );
random_line_split
router.rs
use regex::Regex; use hyper::Method; use hyper::server::Request; use context::{Context, Params, ParamType}; type Handler = fn(Context) -> ::Response; pub struct Router { pub routes: Vec<Route> } impl Router { pub fn new() -> Router { Router { routes: Vec::new() } } pub fn get(&mut self, path: &str, handler: Handler) { self.routes.push( Route::new(Method::Get, path, handler) ); } } #[derive(Clone)] pub struct Route { pub method: Method, pub path: RoutePath, pub handler: Handler } impl Route { pub fn new(method: Method, path: &str, handler: Handler) -> Route { Route { method: method, path: RoutePath::new(path), handler: handler } } pub fn matches_request(&self, request: &Request) -> Option<Params> { if self.method!= *request.method() { return None; } return self.path.matches_path(request.path()); } } #[derive(Clone, PartialEq, Debug)] pub enum PathToken { Str(String), Var { key: String, datatype: String } } #[derive(Clone, Debug, PartialEq)] pub struct RoutePath { pub tokenized_path: Vec<PathToken> } impl RoutePath { pub fn new(path: &str) -> Self { let vec_path = RoutePath::tokenize_path(path); RoutePath { tokenized_path: vec_path } } fn tokenize_path(path: &str) -> Vec<PathToken> { let path = &path[1..]; // Remove root let re = Regex::new(r"^\{([a-zA-Z_]+)\}$").unwrap(); let path_vec = path.split("/") .map(|t| { if re.is_match(t) { // Capture the variable name between {} let cap = re.captures(t).unwrap(); // There should be only one, grab it as str let key = cap.get(1).unwrap().as_str(); return PathToken::Var { key: String::from(key), datatype: String::from("string") } } PathToken::Str(String::from(t)) }) .collect::<Vec<PathToken>>(); path_vec } pub fn matches_path(&self, request_path: &str) -> Option<Params> { // Remove /, split on /, into vec of Strings let incoming_path = &request_path[1..].split("/").map(|i| { String::from(i) }).collect::<Vec<String>>(); // Both RoutePath and Request should have equal length tokenized paths if self.tokenized_path.len()!= incoming_path.len() { return None; } // Save url params while processing let mut params = Params::new(); for (index, token) in self.tokenized_path.iter().enumerate() { match token { &PathToken::Str(ref s) => { if *s!= incoming_path[index] { return None } }, &PathToken::Var {ref key, ref datatype} => { if datatype == "string" { params.insert( key.to_string(), ParamType::Str(incoming_path[index].to_string()) ); } } } } Some(params) } } #[cfg(test)] mod tests { use std::str::FromStr; use hyper::{Request, Response, Method, Uri}; use super::{Route, RoutePath, PathToken, Context}; fn generic_handler(_context: Context) -> Response { return Response::new(); } #[test] fn routepath_for_root() { let routepath = RoutePath::new("/"); assert_eq!(routepath.tokenized_path, vec![PathToken::Str(String::from(""))]); } #[test] fn routepath_for_user_profile() { let routepath = RoutePath::new("/user/profile"); assert_eq!(routepath.tokenized_path, vec![ PathToken::Str(String::from("user")), PathToken::Str(String::from("profile")) ] ) } #[test] fn route_should_be_matched() { let route = Route::new(Method::Get, "/monkeys", generic_handler); let path = Uri::from_str("http://example.com/monkeys").unwrap(); let request: Request = Request::new(Method::Get, path); assert!(route.matches_request(&request).is_some()); } #[test] fn
() { let route = Route::new(Method::Get, "/monkeys", generic_handler); let path = Uri::from_str("http://example.com/nomatch").unwrap(); let request: Request = Request::new(Method::Get, path); assert!(route.matches_request(&request).is_none()); } #[test] fn route_should_match_with_variables() { let route = Route::new(Method::Get, "/user/{username}", generic_handler); let path = Uri::from_str("http://example.com/user/johndoe").unwrap(); let request: Request = Request::new(Method::Get, path); assert!(route.path.matches_path(request.path()).is_some()); assert!(route.matches_request(&request).is_some()); } }
route_should_not_be_matched
identifier_name
mod.rs
//! HTTP Server //! //! # Server //! //! A `Server` is created to listen on port, parse HTTP requests, and hand //! them off to a `Handler`. By default, the Server will listen across multiple //! threads, but that can be configured to a single thread if preferred. //! //! # Handling requests //! //! You must pass a `Handler` to the Server that will handle requests. There is //! a default implementation for `fn`s and closures, allowing you pass one of //! those easily. //! //! //! ```no_run //! use hyper::server::{Server, Request, Response}; //! //! fn hello(req: Request, res: Response) { //! // handle things here //! } //! //! Server::http("0.0.0.0:0").unwrap().handle(hello).unwrap(); //! ``` //! //! As with any trait, you can also define a struct and implement `Handler` //! directly on your own type, and pass that to the `Server` instead. //! //! ```no_run //! use std::sync::Mutex; //! use std::sync::mpsc::{channel, Sender}; //! use hyper::server::{Handler, Server, Request, Response}; //! //! struct SenderHandler { //! sender: Mutex<Sender<&'static str>> //! } //! //! impl Handler for SenderHandler { //! fn handle(&self, req: Request, res: Response) { //! self.sender.lock().unwrap().send("start").unwrap(); //! } //! } //! //! //! let (tx, rx) = channel(); //! Server::http("0.0.0.0:0").unwrap().handle(SenderHandler { //! sender: Mutex::new(tx) //! }).unwrap(); //! ``` //! //! Since the `Server` will be listening on multiple threads, the `Handler` //! must implement `Sync`: any mutable state must be synchronized. //! //! ```no_run //! use std::sync::atomic::{AtomicUsize, Ordering}; //! use hyper::server::{Server, Request, Response}; //! //! let counter = AtomicUsize::new(0); //! Server::http("0.0.0.0:0").unwrap().handle(move |req: Request, res: Response| { //! counter.fetch_add(1, Ordering::Relaxed); //! }).unwrap(); //! ``` //! //! # The `Request` and `Response` pair //! //! A `Handler` receives a pair of arguments, a `Request` and a `Response`. The //! `Request` includes access to the `method`, `uri`, and `headers` of the //! incoming HTTP request. It also implements `std::io::Read`, in order to //! read any body, such as with `POST` or `PUT` messages. //! //! Likewise, the `Response` includes ways to set the `status` and `headers`, //! and implements `std::io::Write` to allow writing the response body. //! //! ```no_run //! use std::io; //! use hyper::server::{Server, Request, Response}; //! use hyper::status::StatusCode; //! //! Server::http("0.0.0.0:0").unwrap().handle(|mut req: Request, mut res: Response| { //! match req.method { //! hyper::Post => { //! io::copy(&mut req, &mut res.start().unwrap()).unwrap(); //! }, //! _ => *res.status_mut() = StatusCode::MethodNotAllowed //! } //! }).unwrap(); //! ``` //! //! ## An aside: Write Status //! //! The `Response` uses a phantom type parameter to determine its write status. //! What does that mean? In short, it ensures you never write a body before //! adding all headers, and never add a header after writing some of the body. //! //! This is often done in most implementations by include a boolean property //! on the response, such as `headers_written`, checking that each time the //! body has something to write, so as to make sure the headers are sent once, //! and only once. But this has 2 downsides: //! //! 1. You are typically never notified that your late header is doing nothing. //! 2. There's a runtime cost to checking on every write. //! //! Instead, hyper handles this statically, or at compile-time. A //! `Response<Fresh>` includes a `headers_mut()` method, allowing you add more //! headers. It also does not implement `Write`, so you can't accidentally //! write early. Once the "head" of the response is correct, you can "send" it //! out by calling `start` on the `Response<Fresh>`. This will return a new //! `Response<Streaming>` object, that no longer has `headers_mut()`, but does //! implement `Write`. use std::fmt; use std::io::{self, ErrorKind, BufWriter, Write}; use std::net::{SocketAddr, ToSocketAddrs}; use std::thread::{self, JoinHandle}; use std::time::Duration; use num_cpus; pub use self::request::Request; pub use self::response::Response; pub use net::{Fresh, Streaming}; use Error; use buffer::BufReader; use header::{Headers, Expect, Connection}; use http; use method::Method; use net::{NetworkListener, NetworkStream, HttpListener, HttpsListener, Ssl}; use status::StatusCode; use uri::RequestUri; use version::HttpVersion::Http11; use self::listener::ListenerPool; pub mod request; pub mod response; mod listener; /// A server can listen on a TCP socket. /// /// Once listening, it will create a `Request`/`Response` pair for each /// incoming connection, and hand them to the provided handler. #[derive(Debug)] pub struct Server<L = HttpListener> { listener: L, timeouts: Timeouts, } #[derive(Clone, Copy, Default, Debug)] struct Timeouts { read: Option<Duration>, write: Option<Duration>, keep_alive: Option<Duration>, } macro_rules! try_option( ($e:expr) => {{ match $e { Some(v) => v, None => return None } }} ); impl<L: NetworkListener> Server<L> { /// Creates a new server with the provided handler. #[inline] pub fn new(listener: L) -> Server<L> { Server { listener: listener, timeouts: Timeouts::default(), } } /// Enables keep-alive for this server. /// /// The timeout duration passed will be used to determine how long /// to keep the connection alive before dropping it. /// /// **NOTE**: The timeout will only be used when the `timeouts` feature /// is enabled for hyper, and rustc is 1.4 or greater. #[inline] pub fn keep_alive(&mut self, timeout: Duration) { self.timeouts.keep_alive = Some(timeout); } #[cfg(feature = "timeouts")] pub fn set_read_timeout(&mut self, dur: Option<Duration>) { self.timeouts.read = dur; } #[cfg(feature = "timeouts")] pub fn set_write_timeout(&mut self, dur: Option<Duration>) { self.timeouts.write = dur; } } impl Server<HttpListener> { /// Creates a new server that will handle `HttpStream`s. pub fn http<To: ToSocketAddrs>(addr: To) -> ::Result<Server<HttpListener>> { HttpListener::new(addr).map(Server::new) } } impl<S: Ssl + Clone + Send> Server<HttpsListener<S>> { /// Creates a new server that will handle `HttpStream`s over SSL. /// /// You can use any SSL implementation, as long as implements `hyper::net::Ssl`. pub fn https<A: ToSocketAddrs>(addr: A, ssl: S) -> ::Result<Server<HttpsListener<S>>> { HttpsListener::new(addr, ssl).map(Server::new) } } impl<L: NetworkListener + Send +'static> Server<L> { /// Binds to a socket and starts handling connections. pub fn handle<H: Handler +'static>(self, handler: H) -> ::Result<Listening> { self.handle_threads(handler, num_cpus::get() * 5 / 4) } /// Binds to a socket and starts handling connections with the provided /// number of threads. pub fn handle_threads<H: Handler +'static>(self, handler: H, threads: usize) -> ::Result<Listening> { handle(self, handler, threads) } } fn handle<H, L>(mut server: Server<L>, handler: H, threads: usize) -> ::Result<Listening> where H: Handler +'static, L: NetworkListener + Send +'static { let socket = try!(server.listener.local_addr()); debug!("threads = {:?}", threads); let pool = ListenerPool::new(server.listener); let worker = Worker::new(handler, server.timeouts); let work = move |mut stream| worker.handle_connection(&mut stream); let guard = thread::spawn(move || pool.accept(work, threads)); Ok(Listening { _guard: Some(guard), socket: socket, }) } struct Worker<H: Handler +'static> { handler: H, timeouts: Timeouts, } impl<H: Handler +'static> Worker<H> { fn new(handler: H, timeouts: Timeouts) -> Worker<H> { Worker { handler: handler, timeouts: timeouts, } } fn handle_connection<S>(&self, mut stream: &mut S) where S: NetworkStream + Clone { debug!("Incoming stream"); self.handler.on_connection_start(); if let Err(e) = self.set_timeouts(&*stream) { error!("set_timeouts error: {:?}", e); return; } let addr = match stream.peer_addr() { Ok(addr) => addr, Err(e) => { error!("Peer Name error: {:?}", e); return; } }; // FIXME: Use Type ascription let stream_clone: &mut NetworkStream = &mut stream.clone(); let mut rdr = BufReader::new(stream_clone); let mut wrt = BufWriter::new(stream); while self.keep_alive_loop(&mut rdr, &mut wrt, addr) { if let Err(e) = self.set_read_timeout(*rdr.get_ref(), self.timeouts.keep_alive) { error!("set_read_timeout keep_alive {:?}", e); break; } } self.handler.on_connection_end(); debug!("keep_alive loop ending for {}", addr); } fn set_timeouts(&self, s: &NetworkStream) -> io::Result<()> { try!(self.set_read_timeout(s, self.timeouts.read)); self.set_write_timeout(s, self.timeouts.write) } #[cfg(not(feature = "timeouts"))] fn set_write_timeout(&self, _s: &NetworkStream, _timeout: Option<Duration>) -> io::Result<()> { Ok(()) } #[cfg(feature = "timeouts")] fn set_write_timeout(&self, s: &NetworkStream, timeout: Option<Duration>) -> io::Result<()> { s.set_write_timeout(timeout) } #[cfg(not(feature = "timeouts"))] fn set_read_timeout(&self, _s: &NetworkStream, _timeout: Option<Duration>) -> io::Result<()> { Ok(()) } #[cfg(feature = "timeouts")] fn set_read_timeout(&self, s: &NetworkStream, timeout: Option<Duration>) -> io::Result<()> { s.set_read_timeout(timeout) } fn keep_alive_loop<W: Write>(&self, mut rdr: &mut BufReader<&mut NetworkStream>, wrt: &mut W, addr: SocketAddr) -> bool { let req = match Request::new(rdr, addr) { Ok(req) => req, Err(Error::Io(ref e)) if e.kind() == ErrorKind::ConnectionAborted => { trace!("tcp closed, cancelling keep-alive loop"); return false; } Err(Error::Io(e)) => { debug!("ioerror in keepalive loop = {:?}", e); return false; } Err(e) => { //TODO: send a 400 response error!("request error = {:?}", e); return false; } }; if!self.handle_expect(&req, wrt) { return false; } if let Err(e) = req.set_read_timeout(self.timeouts.read) { error!("set_read_timeout {:?}", e); return false; } let mut keep_alive = self.timeouts.keep_alive.is_some() && http::should_keep_alive(req.version, &req.headers); let version = req.version; let mut res_headers = Headers::new(); if!keep_alive { res_headers.set(Connection::close()); } { let mut res = Response::new(wrt, &mut res_headers); res.version = version; self.handler.handle(req, res); } // if the request was keep-alive, we need to check that the server agrees // if it wasn't, then the server cannot force it to be true anyways if keep_alive { keep_alive = http::should_keep_alive(version, &res_headers); } debug!("keep_alive = {:?} for {}", keep_alive, addr); keep_alive } fn handle_expect<W: Write>(&self, req: &Request, wrt: &mut W) -> bool { if req.version == Http11 && req.headers.get() == Some(&Expect::Continue) { let status = self.handler.check_continue((&req.method, &req.uri, &req.headers)); match write!(wrt, "{} {}\r\n\r\n", Http11, status) { Ok(..) => (), Err(e) => { error!("error writing 100-continue: {:?}", e); return false; } } if status!= StatusCode::Continue { debug!("non-100 status ({}) for Expect 100 request", status); return false; } } true } } /// A listening server, which can later be closed. pub struct Listening { _guard: Option<JoinHandle<()>>, /// The socket addresses that the server is bound to. pub socket: SocketAddr, } impl fmt::Debug for Listening { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Listening {{ socket: {:?} }}", self.socket) } } impl Drop for Listening { fn drop(&mut self)
} impl Listening { /// Stop the server from listening to its socket address. pub fn close(&mut self) -> ::Result<()> { let _ = self._guard.take(); debug!("closing server"); Ok(()) } } /// A handler that can handle incoming requests for a server. pub trait Handler: Sync + Send { /// Receives a `Request`/`Response` pair, and should perform some action on them. /// /// This could reading from the request, and writing to the response. fn handle<'a, 'k>(&'a self, Request<'a, 'k>, Response<'a, Fresh>); /// Called when a Request includes a `Expect: 100-continue` header. /// /// By default, this will always immediately response with a `StatusCode::Continue`, /// but can be overridden with custom behavior. fn check_continue(&self, _: (&Method, &RequestUri, &Headers)) -> StatusCode { StatusCode::Continue } /// This is run after a connection is received, on a per-connection basis (not a /// per-request basis, as a connection with keep-alive may handle multiple /// requests) fn on_connection_start(&self) { } /// This is run before a connection is closed, on a per-connection basis (not a /// per-request basis, as a connection with keep-alive may handle multiple /// requests) fn on_connection_end(&self) { } } impl<F> Handler for F where F: Fn(Request, Response<Fresh>), F: Sync + Send { fn handle<'a, 'k>(&'a self, req: Request<'a, 'k>, res: Response<'a, Fresh>) { self(req, res) } } #[cfg(test)] mod tests { use header::Headers; use method::Method; use mock::MockStream; use status::StatusCode; use uri::RequestUri; use super::{Request, Response, Fresh, Handler, Worker}; #[test] fn test_check_continue_default() { let mut mock = MockStream::with_input(b"\ POST /upload HTTP/1.1\r\n\ Host: example.domain\r\n\ Expect: 100-continue\r\n\ Content-Length: 10\r\n\ \r\n\ 1234567890\ "); fn handle(_: Request, res: Response<Fresh>) { res.start().unwrap().end().unwrap(); } Worker::new(handle, Default::default()).handle_connection(&mut mock); let cont = b"HTTP/1.1 100 Continue\r\n\r\n"; assert_eq!(&mock.write[..cont.len()], cont); let res = b"HTTP/1.1 200 OK\r\n"; assert_eq!(&mock.write[cont.len()..cont.len() + res.len()], res); } #[test] fn test_check_continue_reject() { struct Reject; impl Handler for Reject { fn handle<'a, 'k>(&'a self, _: Request<'a, 'k>, res: Response<'a, Fresh>) { res.start().unwrap().end().unwrap(); } fn check_continue(&self, _: (&Method, &RequestUri, &Headers)) -> StatusCode { StatusCode::ExpectationFailed } } let mut mock = MockStream::with_input(b"\ POST /upload HTTP/1.1\r\n\ Host: example.domain\r\n\ Expect: 100-continue\r\n\ Content-Length: 10\r\n\ \r\n\ 1234567890\ "); Worker::new(Reject, Default::default()).handle_connection(&mut mock); assert_eq!(mock.write, &b"HTTP/1.1 417 Expectation Failed\r\n\r\n"[..]); } }
{ let _ = self._guard.take().map(|g| g.join()); }
identifier_body
mod.rs
//! HTTP Server //! //! # Server //! //! A `Server` is created to listen on port, parse HTTP requests, and hand //! them off to a `Handler`. By default, the Server will listen across multiple //! threads, but that can be configured to a single thread if preferred. //! //! # Handling requests //! //! You must pass a `Handler` to the Server that will handle requests. There is //! a default implementation for `fn`s and closures, allowing you pass one of //! those easily. //! //! //! ```no_run //! use hyper::server::{Server, Request, Response}; //! //! fn hello(req: Request, res: Response) { //! // handle things here //! } //! //! Server::http("0.0.0.0:0").unwrap().handle(hello).unwrap(); //! ``` //! //! As with any trait, you can also define a struct and implement `Handler` //! directly on your own type, and pass that to the `Server` instead. //! //! ```no_run //! use std::sync::Mutex; //! use std::sync::mpsc::{channel, Sender}; //! use hyper::server::{Handler, Server, Request, Response}; //! //! struct SenderHandler { //! sender: Mutex<Sender<&'static str>> //! } //! //! impl Handler for SenderHandler { //! fn handle(&self, req: Request, res: Response) { //! self.sender.lock().unwrap().send("start").unwrap(); //! } //! } //! //! //! let (tx, rx) = channel(); //! Server::http("0.0.0.0:0").unwrap().handle(SenderHandler { //! sender: Mutex::new(tx) //! }).unwrap(); //! ``` //! //! Since the `Server` will be listening on multiple threads, the `Handler` //! must implement `Sync`: any mutable state must be synchronized. //! //! ```no_run //! use std::sync::atomic::{AtomicUsize, Ordering}; //! use hyper::server::{Server, Request, Response}; //! //! let counter = AtomicUsize::new(0); //! Server::http("0.0.0.0:0").unwrap().handle(move |req: Request, res: Response| { //! counter.fetch_add(1, Ordering::Relaxed); //! }).unwrap(); //! ``` //! //! # The `Request` and `Response` pair //! //! A `Handler` receives a pair of arguments, a `Request` and a `Response`. The //! `Request` includes access to the `method`, `uri`, and `headers` of the //! incoming HTTP request. It also implements `std::io::Read`, in order to //! read any body, such as with `POST` or `PUT` messages. //! //! Likewise, the `Response` includes ways to set the `status` and `headers`, //! and implements `std::io::Write` to allow writing the response body. //! //! ```no_run //! use std::io; //! use hyper::server::{Server, Request, Response}; //! use hyper::status::StatusCode; //! //! Server::http("0.0.0.0:0").unwrap().handle(|mut req: Request, mut res: Response| { //! match req.method { //! hyper::Post => { //! io::copy(&mut req, &mut res.start().unwrap()).unwrap(); //! }, //! _ => *res.status_mut() = StatusCode::MethodNotAllowed //! } //! }).unwrap(); //! ``` //! //! ## An aside: Write Status //! //! The `Response` uses a phantom type parameter to determine its write status. //! What does that mean? In short, it ensures you never write a body before //! adding all headers, and never add a header after writing some of the body. //! //! This is often done in most implementations by include a boolean property //! on the response, such as `headers_written`, checking that each time the //! body has something to write, so as to make sure the headers are sent once, //! and only once. But this has 2 downsides: //! //! 1. You are typically never notified that your late header is doing nothing. //! 2. There's a runtime cost to checking on every write. //! //! Instead, hyper handles this statically, or at compile-time. A //! `Response<Fresh>` includes a `headers_mut()` method, allowing you add more //! headers. It also does not implement `Write`, so you can't accidentally //! write early. Once the "head" of the response is correct, you can "send" it //! out by calling `start` on the `Response<Fresh>`. This will return a new //! `Response<Streaming>` object, that no longer has `headers_mut()`, but does //! implement `Write`. use std::fmt; use std::io::{self, ErrorKind, BufWriter, Write}; use std::net::{SocketAddr, ToSocketAddrs}; use std::thread::{self, JoinHandle}; use std::time::Duration; use num_cpus; pub use self::request::Request; pub use self::response::Response; pub use net::{Fresh, Streaming}; use Error; use buffer::BufReader; use header::{Headers, Expect, Connection}; use http; use method::Method; use net::{NetworkListener, NetworkStream, HttpListener, HttpsListener, Ssl}; use status::StatusCode; use uri::RequestUri; use version::HttpVersion::Http11; use self::listener::ListenerPool; pub mod request; pub mod response; mod listener; /// A server can listen on a TCP socket. /// /// Once listening, it will create a `Request`/`Response` pair for each /// incoming connection, and hand them to the provided handler. #[derive(Debug)] pub struct Server<L = HttpListener> { listener: L, timeouts: Timeouts, } #[derive(Clone, Copy, Default, Debug)] struct Timeouts { read: Option<Duration>, write: Option<Duration>, keep_alive: Option<Duration>, } macro_rules! try_option( ($e:expr) => {{ match $e { Some(v) => v, None => return None } }} ); impl<L: NetworkListener> Server<L> { /// Creates a new server with the provided handler. #[inline] pub fn new(listener: L) -> Server<L> { Server { listener: listener, timeouts: Timeouts::default(), } } /// Enables keep-alive for this server. /// /// The timeout duration passed will be used to determine how long /// to keep the connection alive before dropping it. /// /// **NOTE**: The timeout will only be used when the `timeouts` feature /// is enabled for hyper, and rustc is 1.4 or greater. #[inline] pub fn keep_alive(&mut self, timeout: Duration) { self.timeouts.keep_alive = Some(timeout); } #[cfg(feature = "timeouts")] pub fn set_read_timeout(&mut self, dur: Option<Duration>) { self.timeouts.read = dur; } #[cfg(feature = "timeouts")] pub fn set_write_timeout(&mut self, dur: Option<Duration>) { self.timeouts.write = dur; } } impl Server<HttpListener> { /// Creates a new server that will handle `HttpStream`s. pub fn http<To: ToSocketAddrs>(addr: To) -> ::Result<Server<HttpListener>> { HttpListener::new(addr).map(Server::new) } } impl<S: Ssl + Clone + Send> Server<HttpsListener<S>> { /// Creates a new server that will handle `HttpStream`s over SSL. /// /// You can use any SSL implementation, as long as implements `hyper::net::Ssl`. pub fn https<A: ToSocketAddrs>(addr: A, ssl: S) -> ::Result<Server<HttpsListener<S>>> { HttpsListener::new(addr, ssl).map(Server::new) } } impl<L: NetworkListener + Send +'static> Server<L> { /// Binds to a socket and starts handling connections. pub fn handle<H: Handler +'static>(self, handler: H) -> ::Result<Listening> { self.handle_threads(handler, num_cpus::get() * 5 / 4) } /// Binds to a socket and starts handling connections with the provided /// number of threads. pub fn handle_threads<H: Handler +'static>(self, handler: H, threads: usize) -> ::Result<Listening> { handle(self, handler, threads) } } fn handle<H, L>(mut server: Server<L>, handler: H, threads: usize) -> ::Result<Listening> where H: Handler +'static, L: NetworkListener + Send +'static { let socket = try!(server.listener.local_addr()); debug!("threads = {:?}", threads); let pool = ListenerPool::new(server.listener); let worker = Worker::new(handler, server.timeouts); let work = move |mut stream| worker.handle_connection(&mut stream); let guard = thread::spawn(move || pool.accept(work, threads)); Ok(Listening { _guard: Some(guard), socket: socket, }) } struct Worker<H: Handler +'static> { handler: H, timeouts: Timeouts, } impl<H: Handler +'static> Worker<H> { fn new(handler: H, timeouts: Timeouts) -> Worker<H> { Worker { handler: handler, timeouts: timeouts, } } fn handle_connection<S>(&self, mut stream: &mut S) where S: NetworkStream + Clone { debug!("Incoming stream"); self.handler.on_connection_start(); if let Err(e) = self.set_timeouts(&*stream) { error!("set_timeouts error: {:?}", e); return; } let addr = match stream.peer_addr() { Ok(addr) => addr, Err(e) => { error!("Peer Name error: {:?}", e); return; } }; // FIXME: Use Type ascription let stream_clone: &mut NetworkStream = &mut stream.clone(); let mut rdr = BufReader::new(stream_clone); let mut wrt = BufWriter::new(stream); while self.keep_alive_loop(&mut rdr, &mut wrt, addr) { if let Err(e) = self.set_read_timeout(*rdr.get_ref(), self.timeouts.keep_alive)
} self.handler.on_connection_end(); debug!("keep_alive loop ending for {}", addr); } fn set_timeouts(&self, s: &NetworkStream) -> io::Result<()> { try!(self.set_read_timeout(s, self.timeouts.read)); self.set_write_timeout(s, self.timeouts.write) } #[cfg(not(feature = "timeouts"))] fn set_write_timeout(&self, _s: &NetworkStream, _timeout: Option<Duration>) -> io::Result<()> { Ok(()) } #[cfg(feature = "timeouts")] fn set_write_timeout(&self, s: &NetworkStream, timeout: Option<Duration>) -> io::Result<()> { s.set_write_timeout(timeout) } #[cfg(not(feature = "timeouts"))] fn set_read_timeout(&self, _s: &NetworkStream, _timeout: Option<Duration>) -> io::Result<()> { Ok(()) } #[cfg(feature = "timeouts")] fn set_read_timeout(&self, s: &NetworkStream, timeout: Option<Duration>) -> io::Result<()> { s.set_read_timeout(timeout) } fn keep_alive_loop<W: Write>(&self, mut rdr: &mut BufReader<&mut NetworkStream>, wrt: &mut W, addr: SocketAddr) -> bool { let req = match Request::new(rdr, addr) { Ok(req) => req, Err(Error::Io(ref e)) if e.kind() == ErrorKind::ConnectionAborted => { trace!("tcp closed, cancelling keep-alive loop"); return false; } Err(Error::Io(e)) => { debug!("ioerror in keepalive loop = {:?}", e); return false; } Err(e) => { //TODO: send a 400 response error!("request error = {:?}", e); return false; } }; if!self.handle_expect(&req, wrt) { return false; } if let Err(e) = req.set_read_timeout(self.timeouts.read) { error!("set_read_timeout {:?}", e); return false; } let mut keep_alive = self.timeouts.keep_alive.is_some() && http::should_keep_alive(req.version, &req.headers); let version = req.version; let mut res_headers = Headers::new(); if!keep_alive { res_headers.set(Connection::close()); } { let mut res = Response::new(wrt, &mut res_headers); res.version = version; self.handler.handle(req, res); } // if the request was keep-alive, we need to check that the server agrees // if it wasn't, then the server cannot force it to be true anyways if keep_alive { keep_alive = http::should_keep_alive(version, &res_headers); } debug!("keep_alive = {:?} for {}", keep_alive, addr); keep_alive } fn handle_expect<W: Write>(&self, req: &Request, wrt: &mut W) -> bool { if req.version == Http11 && req.headers.get() == Some(&Expect::Continue) { let status = self.handler.check_continue((&req.method, &req.uri, &req.headers)); match write!(wrt, "{} {}\r\n\r\n", Http11, status) { Ok(..) => (), Err(e) => { error!("error writing 100-continue: {:?}", e); return false; } } if status!= StatusCode::Continue { debug!("non-100 status ({}) for Expect 100 request", status); return false; } } true } } /// A listening server, which can later be closed. pub struct Listening { _guard: Option<JoinHandle<()>>, /// The socket addresses that the server is bound to. pub socket: SocketAddr, } impl fmt::Debug for Listening { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Listening {{ socket: {:?} }}", self.socket) } } impl Drop for Listening { fn drop(&mut self) { let _ = self._guard.take().map(|g| g.join()); } } impl Listening { /// Stop the server from listening to its socket address. pub fn close(&mut self) -> ::Result<()> { let _ = self._guard.take(); debug!("closing server"); Ok(()) } } /// A handler that can handle incoming requests for a server. pub trait Handler: Sync + Send { /// Receives a `Request`/`Response` pair, and should perform some action on them. /// /// This could reading from the request, and writing to the response. fn handle<'a, 'k>(&'a self, Request<'a, 'k>, Response<'a, Fresh>); /// Called when a Request includes a `Expect: 100-continue` header. /// /// By default, this will always immediately response with a `StatusCode::Continue`, /// but can be overridden with custom behavior. fn check_continue(&self, _: (&Method, &RequestUri, &Headers)) -> StatusCode { StatusCode::Continue } /// This is run after a connection is received, on a per-connection basis (not a /// per-request basis, as a connection with keep-alive may handle multiple /// requests) fn on_connection_start(&self) { } /// This is run before a connection is closed, on a per-connection basis (not a /// per-request basis, as a connection with keep-alive may handle multiple /// requests) fn on_connection_end(&self) { } } impl<F> Handler for F where F: Fn(Request, Response<Fresh>), F: Sync + Send { fn handle<'a, 'k>(&'a self, req: Request<'a, 'k>, res: Response<'a, Fresh>) { self(req, res) } } #[cfg(test)] mod tests { use header::Headers; use method::Method; use mock::MockStream; use status::StatusCode; use uri::RequestUri; use super::{Request, Response, Fresh, Handler, Worker}; #[test] fn test_check_continue_default() { let mut mock = MockStream::with_input(b"\ POST /upload HTTP/1.1\r\n\ Host: example.domain\r\n\ Expect: 100-continue\r\n\ Content-Length: 10\r\n\ \r\n\ 1234567890\ "); fn handle(_: Request, res: Response<Fresh>) { res.start().unwrap().end().unwrap(); } Worker::new(handle, Default::default()).handle_connection(&mut mock); let cont = b"HTTP/1.1 100 Continue\r\n\r\n"; assert_eq!(&mock.write[..cont.len()], cont); let res = b"HTTP/1.1 200 OK\r\n"; assert_eq!(&mock.write[cont.len()..cont.len() + res.len()], res); } #[test] fn test_check_continue_reject() { struct Reject; impl Handler for Reject { fn handle<'a, 'k>(&'a self, _: Request<'a, 'k>, res: Response<'a, Fresh>) { res.start().unwrap().end().unwrap(); } fn check_continue(&self, _: (&Method, &RequestUri, &Headers)) -> StatusCode { StatusCode::ExpectationFailed } } let mut mock = MockStream::with_input(b"\ POST /upload HTTP/1.1\r\n\ Host: example.domain\r\n\ Expect: 100-continue\r\n\ Content-Length: 10\r\n\ \r\n\ 1234567890\ "); Worker::new(Reject, Default::default()).handle_connection(&mut mock); assert_eq!(mock.write, &b"HTTP/1.1 417 Expectation Failed\r\n\r\n"[..]); } }
{ error!("set_read_timeout keep_alive {:?}", e); break; }
conditional_block
mod.rs
//! HTTP Server //! //! # Server //! //! A `Server` is created to listen on port, parse HTTP requests, and hand //! them off to a `Handler`. By default, the Server will listen across multiple //! threads, but that can be configured to a single thread if preferred. //! //! # Handling requests //! //! You must pass a `Handler` to the Server that will handle requests. There is //! a default implementation for `fn`s and closures, allowing you pass one of //! those easily. //! //! //! ```no_run //! use hyper::server::{Server, Request, Response}; //! //! fn hello(req: Request, res: Response) { //! // handle things here //! } //! //! Server::http("0.0.0.0:0").unwrap().handle(hello).unwrap(); //! ``` //! //! As with any trait, you can also define a struct and implement `Handler` //! directly on your own type, and pass that to the `Server` instead. //! //! ```no_run //! use std::sync::Mutex; //! use std::sync::mpsc::{channel, Sender}; //! use hyper::server::{Handler, Server, Request, Response}; //! //! struct SenderHandler { //! sender: Mutex<Sender<&'static str>> //! } //! //! impl Handler for SenderHandler { //! fn handle(&self, req: Request, res: Response) { //! self.sender.lock().unwrap().send("start").unwrap(); //! } //! } //! //! //! let (tx, rx) = channel(); //! Server::http("0.0.0.0:0").unwrap().handle(SenderHandler { //! sender: Mutex::new(tx) //! }).unwrap(); //! ``` //! //! Since the `Server` will be listening on multiple threads, the `Handler` //! must implement `Sync`: any mutable state must be synchronized. //! //! ```no_run //! use std::sync::atomic::{AtomicUsize, Ordering}; //! use hyper::server::{Server, Request, Response}; //! //! let counter = AtomicUsize::new(0); //! Server::http("0.0.0.0:0").unwrap().handle(move |req: Request, res: Response| { //! counter.fetch_add(1, Ordering::Relaxed); //! }).unwrap(); //! ``` //! //! # The `Request` and `Response` pair //! //! A `Handler` receives a pair of arguments, a `Request` and a `Response`. The //! `Request` includes access to the `method`, `uri`, and `headers` of the //! incoming HTTP request. It also implements `std::io::Read`, in order to //! read any body, such as with `POST` or `PUT` messages. //! //! Likewise, the `Response` includes ways to set the `status` and `headers`, //! and implements `std::io::Write` to allow writing the response body. //! //! ```no_run //! use std::io; //! use hyper::server::{Server, Request, Response}; //! use hyper::status::StatusCode; //! //! Server::http("0.0.0.0:0").unwrap().handle(|mut req: Request, mut res: Response| { //! match req.method { //! hyper::Post => { //! io::copy(&mut req, &mut res.start().unwrap()).unwrap(); //! }, //! _ => *res.status_mut() = StatusCode::MethodNotAllowed //! } //! }).unwrap(); //! ``` //! //! ## An aside: Write Status //! //! The `Response` uses a phantom type parameter to determine its write status. //! What does that mean? In short, it ensures you never write a body before //! adding all headers, and never add a header after writing some of the body. //! //! This is often done in most implementations by include a boolean property //! on the response, such as `headers_written`, checking that each time the //! body has something to write, so as to make sure the headers are sent once, //! and only once. But this has 2 downsides: //! //! 1. You are typically never notified that your late header is doing nothing. //! 2. There's a runtime cost to checking on every write. //! //! Instead, hyper handles this statically, or at compile-time. A //! `Response<Fresh>` includes a `headers_mut()` method, allowing you add more //! headers. It also does not implement `Write`, so you can't accidentally //! write early. Once the "head" of the response is correct, you can "send" it //! out by calling `start` on the `Response<Fresh>`. This will return a new //! `Response<Streaming>` object, that no longer has `headers_mut()`, but does //! implement `Write`. use std::fmt; use std::io::{self, ErrorKind, BufWriter, Write}; use std::net::{SocketAddr, ToSocketAddrs}; use std::thread::{self, JoinHandle}; use std::time::Duration; use num_cpus; pub use self::request::Request; pub use self::response::Response; pub use net::{Fresh, Streaming}; use Error; use buffer::BufReader; use header::{Headers, Expect, Connection}; use http; use method::Method; use net::{NetworkListener, NetworkStream, HttpListener, HttpsListener, Ssl}; use status::StatusCode; use uri::RequestUri; use version::HttpVersion::Http11; use self::listener::ListenerPool; pub mod request; pub mod response; mod listener; /// A server can listen on a TCP socket. /// /// Once listening, it will create a `Request`/`Response` pair for each /// incoming connection, and hand them to the provided handler. #[derive(Debug)] pub struct Server<L = HttpListener> { listener: L, timeouts: Timeouts, } #[derive(Clone, Copy, Default, Debug)] struct Timeouts { read: Option<Duration>, write: Option<Duration>, keep_alive: Option<Duration>, } macro_rules! try_option( ($e:expr) => {{ match $e { Some(v) => v, None => return None } }} ); impl<L: NetworkListener> Server<L> { /// Creates a new server with the provided handler. #[inline] pub fn new(listener: L) -> Server<L> { Server { listener: listener, timeouts: Timeouts::default(), } } /// Enables keep-alive for this server. /// /// The timeout duration passed will be used to determine how long /// to keep the connection alive before dropping it. /// /// **NOTE**: The timeout will only be used when the `timeouts` feature /// is enabled for hyper, and rustc is 1.4 or greater. #[inline] pub fn
(&mut self, timeout: Duration) { self.timeouts.keep_alive = Some(timeout); } #[cfg(feature = "timeouts")] pub fn set_read_timeout(&mut self, dur: Option<Duration>) { self.timeouts.read = dur; } #[cfg(feature = "timeouts")] pub fn set_write_timeout(&mut self, dur: Option<Duration>) { self.timeouts.write = dur; } } impl Server<HttpListener> { /// Creates a new server that will handle `HttpStream`s. pub fn http<To: ToSocketAddrs>(addr: To) -> ::Result<Server<HttpListener>> { HttpListener::new(addr).map(Server::new) } } impl<S: Ssl + Clone + Send> Server<HttpsListener<S>> { /// Creates a new server that will handle `HttpStream`s over SSL. /// /// You can use any SSL implementation, as long as implements `hyper::net::Ssl`. pub fn https<A: ToSocketAddrs>(addr: A, ssl: S) -> ::Result<Server<HttpsListener<S>>> { HttpsListener::new(addr, ssl).map(Server::new) } } impl<L: NetworkListener + Send +'static> Server<L> { /// Binds to a socket and starts handling connections. pub fn handle<H: Handler +'static>(self, handler: H) -> ::Result<Listening> { self.handle_threads(handler, num_cpus::get() * 5 / 4) } /// Binds to a socket and starts handling connections with the provided /// number of threads. pub fn handle_threads<H: Handler +'static>(self, handler: H, threads: usize) -> ::Result<Listening> { handle(self, handler, threads) } } fn handle<H, L>(mut server: Server<L>, handler: H, threads: usize) -> ::Result<Listening> where H: Handler +'static, L: NetworkListener + Send +'static { let socket = try!(server.listener.local_addr()); debug!("threads = {:?}", threads); let pool = ListenerPool::new(server.listener); let worker = Worker::new(handler, server.timeouts); let work = move |mut stream| worker.handle_connection(&mut stream); let guard = thread::spawn(move || pool.accept(work, threads)); Ok(Listening { _guard: Some(guard), socket: socket, }) } struct Worker<H: Handler +'static> { handler: H, timeouts: Timeouts, } impl<H: Handler +'static> Worker<H> { fn new(handler: H, timeouts: Timeouts) -> Worker<H> { Worker { handler: handler, timeouts: timeouts, } } fn handle_connection<S>(&self, mut stream: &mut S) where S: NetworkStream + Clone { debug!("Incoming stream"); self.handler.on_connection_start(); if let Err(e) = self.set_timeouts(&*stream) { error!("set_timeouts error: {:?}", e); return; } let addr = match stream.peer_addr() { Ok(addr) => addr, Err(e) => { error!("Peer Name error: {:?}", e); return; } }; // FIXME: Use Type ascription let stream_clone: &mut NetworkStream = &mut stream.clone(); let mut rdr = BufReader::new(stream_clone); let mut wrt = BufWriter::new(stream); while self.keep_alive_loop(&mut rdr, &mut wrt, addr) { if let Err(e) = self.set_read_timeout(*rdr.get_ref(), self.timeouts.keep_alive) { error!("set_read_timeout keep_alive {:?}", e); break; } } self.handler.on_connection_end(); debug!("keep_alive loop ending for {}", addr); } fn set_timeouts(&self, s: &NetworkStream) -> io::Result<()> { try!(self.set_read_timeout(s, self.timeouts.read)); self.set_write_timeout(s, self.timeouts.write) } #[cfg(not(feature = "timeouts"))] fn set_write_timeout(&self, _s: &NetworkStream, _timeout: Option<Duration>) -> io::Result<()> { Ok(()) } #[cfg(feature = "timeouts")] fn set_write_timeout(&self, s: &NetworkStream, timeout: Option<Duration>) -> io::Result<()> { s.set_write_timeout(timeout) } #[cfg(not(feature = "timeouts"))] fn set_read_timeout(&self, _s: &NetworkStream, _timeout: Option<Duration>) -> io::Result<()> { Ok(()) } #[cfg(feature = "timeouts")] fn set_read_timeout(&self, s: &NetworkStream, timeout: Option<Duration>) -> io::Result<()> { s.set_read_timeout(timeout) } fn keep_alive_loop<W: Write>(&self, mut rdr: &mut BufReader<&mut NetworkStream>, wrt: &mut W, addr: SocketAddr) -> bool { let req = match Request::new(rdr, addr) { Ok(req) => req, Err(Error::Io(ref e)) if e.kind() == ErrorKind::ConnectionAborted => { trace!("tcp closed, cancelling keep-alive loop"); return false; } Err(Error::Io(e)) => { debug!("ioerror in keepalive loop = {:?}", e); return false; } Err(e) => { //TODO: send a 400 response error!("request error = {:?}", e); return false; } }; if!self.handle_expect(&req, wrt) { return false; } if let Err(e) = req.set_read_timeout(self.timeouts.read) { error!("set_read_timeout {:?}", e); return false; } let mut keep_alive = self.timeouts.keep_alive.is_some() && http::should_keep_alive(req.version, &req.headers); let version = req.version; let mut res_headers = Headers::new(); if!keep_alive { res_headers.set(Connection::close()); } { let mut res = Response::new(wrt, &mut res_headers); res.version = version; self.handler.handle(req, res); } // if the request was keep-alive, we need to check that the server agrees // if it wasn't, then the server cannot force it to be true anyways if keep_alive { keep_alive = http::should_keep_alive(version, &res_headers); } debug!("keep_alive = {:?} for {}", keep_alive, addr); keep_alive } fn handle_expect<W: Write>(&self, req: &Request, wrt: &mut W) -> bool { if req.version == Http11 && req.headers.get() == Some(&Expect::Continue) { let status = self.handler.check_continue((&req.method, &req.uri, &req.headers)); match write!(wrt, "{} {}\r\n\r\n", Http11, status) { Ok(..) => (), Err(e) => { error!("error writing 100-continue: {:?}", e); return false; } } if status!= StatusCode::Continue { debug!("non-100 status ({}) for Expect 100 request", status); return false; } } true } } /// A listening server, which can later be closed. pub struct Listening { _guard: Option<JoinHandle<()>>, /// The socket addresses that the server is bound to. pub socket: SocketAddr, } impl fmt::Debug for Listening { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Listening {{ socket: {:?} }}", self.socket) } } impl Drop for Listening { fn drop(&mut self) { let _ = self._guard.take().map(|g| g.join()); } } impl Listening { /// Stop the server from listening to its socket address. pub fn close(&mut self) -> ::Result<()> { let _ = self._guard.take(); debug!("closing server"); Ok(()) } } /// A handler that can handle incoming requests for a server. pub trait Handler: Sync + Send { /// Receives a `Request`/`Response` pair, and should perform some action on them. /// /// This could reading from the request, and writing to the response. fn handle<'a, 'k>(&'a self, Request<'a, 'k>, Response<'a, Fresh>); /// Called when a Request includes a `Expect: 100-continue` header. /// /// By default, this will always immediately response with a `StatusCode::Continue`, /// but can be overridden with custom behavior. fn check_continue(&self, _: (&Method, &RequestUri, &Headers)) -> StatusCode { StatusCode::Continue } /// This is run after a connection is received, on a per-connection basis (not a /// per-request basis, as a connection with keep-alive may handle multiple /// requests) fn on_connection_start(&self) { } /// This is run before a connection is closed, on a per-connection basis (not a /// per-request basis, as a connection with keep-alive may handle multiple /// requests) fn on_connection_end(&self) { } } impl<F> Handler for F where F: Fn(Request, Response<Fresh>), F: Sync + Send { fn handle<'a, 'k>(&'a self, req: Request<'a, 'k>, res: Response<'a, Fresh>) { self(req, res) } } #[cfg(test)] mod tests { use header::Headers; use method::Method; use mock::MockStream; use status::StatusCode; use uri::RequestUri; use super::{Request, Response, Fresh, Handler, Worker}; #[test] fn test_check_continue_default() { let mut mock = MockStream::with_input(b"\ POST /upload HTTP/1.1\r\n\ Host: example.domain\r\n\ Expect: 100-continue\r\n\ Content-Length: 10\r\n\ \r\n\ 1234567890\ "); fn handle(_: Request, res: Response<Fresh>) { res.start().unwrap().end().unwrap(); } Worker::new(handle, Default::default()).handle_connection(&mut mock); let cont = b"HTTP/1.1 100 Continue\r\n\r\n"; assert_eq!(&mock.write[..cont.len()], cont); let res = b"HTTP/1.1 200 OK\r\n"; assert_eq!(&mock.write[cont.len()..cont.len() + res.len()], res); } #[test] fn test_check_continue_reject() { struct Reject; impl Handler for Reject { fn handle<'a, 'k>(&'a self, _: Request<'a, 'k>, res: Response<'a, Fresh>) { res.start().unwrap().end().unwrap(); } fn check_continue(&self, _: (&Method, &RequestUri, &Headers)) -> StatusCode { StatusCode::ExpectationFailed } } let mut mock = MockStream::with_input(b"\ POST /upload HTTP/1.1\r\n\ Host: example.domain\r\n\ Expect: 100-continue\r\n\ Content-Length: 10\r\n\ \r\n\ 1234567890\ "); Worker::new(Reject, Default::default()).handle_connection(&mut mock); assert_eq!(mock.write, &b"HTTP/1.1 417 Expectation Failed\r\n\r\n"[..]); } }
keep_alive
identifier_name
mod.rs
//! HTTP Server //! //! # Server //! //! A `Server` is created to listen on port, parse HTTP requests, and hand //! them off to a `Handler`. By default, the Server will listen across multiple //! threads, but that can be configured to a single thread if preferred. //! //! # Handling requests //! //! You must pass a `Handler` to the Server that will handle requests. There is //! a default implementation for `fn`s and closures, allowing you pass one of //! those easily. //! //! //! ```no_run //! use hyper::server::{Server, Request, Response}; //! //! fn hello(req: Request, res: Response) { //! // handle things here //! } //! //! Server::http("0.0.0.0:0").unwrap().handle(hello).unwrap(); //! ``` //! //! As with any trait, you can also define a struct and implement `Handler` //! directly on your own type, and pass that to the `Server` instead. //! //! ```no_run //! use std::sync::Mutex; //! use std::sync::mpsc::{channel, Sender}; //! use hyper::server::{Handler, Server, Request, Response}; //! //! struct SenderHandler { //! sender: Mutex<Sender<&'static str>> //! } //! //! impl Handler for SenderHandler { //! fn handle(&self, req: Request, res: Response) { //! self.sender.lock().unwrap().send("start").unwrap(); //! } //! } //! //! //! let (tx, rx) = channel(); //! Server::http("0.0.0.0:0").unwrap().handle(SenderHandler { //! sender: Mutex::new(tx) //! }).unwrap(); //! ``` //! //! Since the `Server` will be listening on multiple threads, the `Handler` //! must implement `Sync`: any mutable state must be synchronized. //! //! ```no_run //! use std::sync::atomic::{AtomicUsize, Ordering}; //! use hyper::server::{Server, Request, Response}; //! //! let counter = AtomicUsize::new(0); //! Server::http("0.0.0.0:0").unwrap().handle(move |req: Request, res: Response| { //! counter.fetch_add(1, Ordering::Relaxed); //! }).unwrap(); //! ``` //! //! # The `Request` and `Response` pair //! //! A `Handler` receives a pair of arguments, a `Request` and a `Response`. The //! `Request` includes access to the `method`, `uri`, and `headers` of the //! incoming HTTP request. It also implements `std::io::Read`, in order to //! read any body, such as with `POST` or `PUT` messages. //! //! Likewise, the `Response` includes ways to set the `status` and `headers`, //! and implements `std::io::Write` to allow writing the response body. //! //! ```no_run //! use std::io; //! use hyper::server::{Server, Request, Response}; //! use hyper::status::StatusCode; //! //! Server::http("0.0.0.0:0").unwrap().handle(|mut req: Request, mut res: Response| { //! match req.method { //! hyper::Post => { //! io::copy(&mut req, &mut res.start().unwrap()).unwrap(); //! }, //! _ => *res.status_mut() = StatusCode::MethodNotAllowed //! } //! }).unwrap(); //! ``` //! //! ## An aside: Write Status //! //! The `Response` uses a phantom type parameter to determine its write status. //! What does that mean? In short, it ensures you never write a body before //! adding all headers, and never add a header after writing some of the body. //! //! This is often done in most implementations by include a boolean property //! on the response, such as `headers_written`, checking that each time the //! body has something to write, so as to make sure the headers are sent once, //! and only once. But this has 2 downsides: //! //! 1. You are typically never notified that your late header is doing nothing. //! 2. There's a runtime cost to checking on every write. //! //! Instead, hyper handles this statically, or at compile-time. A //! `Response<Fresh>` includes a `headers_mut()` method, allowing you add more //! headers. It also does not implement `Write`, so you can't accidentally //! write early. Once the "head" of the response is correct, you can "send" it //! out by calling `start` on the `Response<Fresh>`. This will return a new //! `Response<Streaming>` object, that no longer has `headers_mut()`, but does //! implement `Write`. use std::fmt; use std::io::{self, ErrorKind, BufWriter, Write}; use std::net::{SocketAddr, ToSocketAddrs}; use std::thread::{self, JoinHandle}; use std::time::Duration; use num_cpus; pub use self::request::Request; pub use self::response::Response; pub use net::{Fresh, Streaming}; use Error; use buffer::BufReader; use header::{Headers, Expect, Connection}; use http;
use method::Method; use net::{NetworkListener, NetworkStream, HttpListener, HttpsListener, Ssl}; use status::StatusCode; use uri::RequestUri; use version::HttpVersion::Http11; use self::listener::ListenerPool; pub mod request; pub mod response; mod listener; /// A server can listen on a TCP socket. /// /// Once listening, it will create a `Request`/`Response` pair for each /// incoming connection, and hand them to the provided handler. #[derive(Debug)] pub struct Server<L = HttpListener> { listener: L, timeouts: Timeouts, } #[derive(Clone, Copy, Default, Debug)] struct Timeouts { read: Option<Duration>, write: Option<Duration>, keep_alive: Option<Duration>, } macro_rules! try_option( ($e:expr) => {{ match $e { Some(v) => v, None => return None } }} ); impl<L: NetworkListener> Server<L> { /// Creates a new server with the provided handler. #[inline] pub fn new(listener: L) -> Server<L> { Server { listener: listener, timeouts: Timeouts::default(), } } /// Enables keep-alive for this server. /// /// The timeout duration passed will be used to determine how long /// to keep the connection alive before dropping it. /// /// **NOTE**: The timeout will only be used when the `timeouts` feature /// is enabled for hyper, and rustc is 1.4 or greater. #[inline] pub fn keep_alive(&mut self, timeout: Duration) { self.timeouts.keep_alive = Some(timeout); } #[cfg(feature = "timeouts")] pub fn set_read_timeout(&mut self, dur: Option<Duration>) { self.timeouts.read = dur; } #[cfg(feature = "timeouts")] pub fn set_write_timeout(&mut self, dur: Option<Duration>) { self.timeouts.write = dur; } } impl Server<HttpListener> { /// Creates a new server that will handle `HttpStream`s. pub fn http<To: ToSocketAddrs>(addr: To) -> ::Result<Server<HttpListener>> { HttpListener::new(addr).map(Server::new) } } impl<S: Ssl + Clone + Send> Server<HttpsListener<S>> { /// Creates a new server that will handle `HttpStream`s over SSL. /// /// You can use any SSL implementation, as long as implements `hyper::net::Ssl`. pub fn https<A: ToSocketAddrs>(addr: A, ssl: S) -> ::Result<Server<HttpsListener<S>>> { HttpsListener::new(addr, ssl).map(Server::new) } } impl<L: NetworkListener + Send +'static> Server<L> { /// Binds to a socket and starts handling connections. pub fn handle<H: Handler +'static>(self, handler: H) -> ::Result<Listening> { self.handle_threads(handler, num_cpus::get() * 5 / 4) } /// Binds to a socket and starts handling connections with the provided /// number of threads. pub fn handle_threads<H: Handler +'static>(self, handler: H, threads: usize) -> ::Result<Listening> { handle(self, handler, threads) } } fn handle<H, L>(mut server: Server<L>, handler: H, threads: usize) -> ::Result<Listening> where H: Handler +'static, L: NetworkListener + Send +'static { let socket = try!(server.listener.local_addr()); debug!("threads = {:?}", threads); let pool = ListenerPool::new(server.listener); let worker = Worker::new(handler, server.timeouts); let work = move |mut stream| worker.handle_connection(&mut stream); let guard = thread::spawn(move || pool.accept(work, threads)); Ok(Listening { _guard: Some(guard), socket: socket, }) } struct Worker<H: Handler +'static> { handler: H, timeouts: Timeouts, } impl<H: Handler +'static> Worker<H> { fn new(handler: H, timeouts: Timeouts) -> Worker<H> { Worker { handler: handler, timeouts: timeouts, } } fn handle_connection<S>(&self, mut stream: &mut S) where S: NetworkStream + Clone { debug!("Incoming stream"); self.handler.on_connection_start(); if let Err(e) = self.set_timeouts(&*stream) { error!("set_timeouts error: {:?}", e); return; } let addr = match stream.peer_addr() { Ok(addr) => addr, Err(e) => { error!("Peer Name error: {:?}", e); return; } }; // FIXME: Use Type ascription let stream_clone: &mut NetworkStream = &mut stream.clone(); let mut rdr = BufReader::new(stream_clone); let mut wrt = BufWriter::new(stream); while self.keep_alive_loop(&mut rdr, &mut wrt, addr) { if let Err(e) = self.set_read_timeout(*rdr.get_ref(), self.timeouts.keep_alive) { error!("set_read_timeout keep_alive {:?}", e); break; } } self.handler.on_connection_end(); debug!("keep_alive loop ending for {}", addr); } fn set_timeouts(&self, s: &NetworkStream) -> io::Result<()> { try!(self.set_read_timeout(s, self.timeouts.read)); self.set_write_timeout(s, self.timeouts.write) } #[cfg(not(feature = "timeouts"))] fn set_write_timeout(&self, _s: &NetworkStream, _timeout: Option<Duration>) -> io::Result<()> { Ok(()) } #[cfg(feature = "timeouts")] fn set_write_timeout(&self, s: &NetworkStream, timeout: Option<Duration>) -> io::Result<()> { s.set_write_timeout(timeout) } #[cfg(not(feature = "timeouts"))] fn set_read_timeout(&self, _s: &NetworkStream, _timeout: Option<Duration>) -> io::Result<()> { Ok(()) } #[cfg(feature = "timeouts")] fn set_read_timeout(&self, s: &NetworkStream, timeout: Option<Duration>) -> io::Result<()> { s.set_read_timeout(timeout) } fn keep_alive_loop<W: Write>(&self, mut rdr: &mut BufReader<&mut NetworkStream>, wrt: &mut W, addr: SocketAddr) -> bool { let req = match Request::new(rdr, addr) { Ok(req) => req, Err(Error::Io(ref e)) if e.kind() == ErrorKind::ConnectionAborted => { trace!("tcp closed, cancelling keep-alive loop"); return false; } Err(Error::Io(e)) => { debug!("ioerror in keepalive loop = {:?}", e); return false; } Err(e) => { //TODO: send a 400 response error!("request error = {:?}", e); return false; } }; if!self.handle_expect(&req, wrt) { return false; } if let Err(e) = req.set_read_timeout(self.timeouts.read) { error!("set_read_timeout {:?}", e); return false; } let mut keep_alive = self.timeouts.keep_alive.is_some() && http::should_keep_alive(req.version, &req.headers); let version = req.version; let mut res_headers = Headers::new(); if!keep_alive { res_headers.set(Connection::close()); } { let mut res = Response::new(wrt, &mut res_headers); res.version = version; self.handler.handle(req, res); } // if the request was keep-alive, we need to check that the server agrees // if it wasn't, then the server cannot force it to be true anyways if keep_alive { keep_alive = http::should_keep_alive(version, &res_headers); } debug!("keep_alive = {:?} for {}", keep_alive, addr); keep_alive } fn handle_expect<W: Write>(&self, req: &Request, wrt: &mut W) -> bool { if req.version == Http11 && req.headers.get() == Some(&Expect::Continue) { let status = self.handler.check_continue((&req.method, &req.uri, &req.headers)); match write!(wrt, "{} {}\r\n\r\n", Http11, status) { Ok(..) => (), Err(e) => { error!("error writing 100-continue: {:?}", e); return false; } } if status!= StatusCode::Continue { debug!("non-100 status ({}) for Expect 100 request", status); return false; } } true } } /// A listening server, which can later be closed. pub struct Listening { _guard: Option<JoinHandle<()>>, /// The socket addresses that the server is bound to. pub socket: SocketAddr, } impl fmt::Debug for Listening { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Listening {{ socket: {:?} }}", self.socket) } } impl Drop for Listening { fn drop(&mut self) { let _ = self._guard.take().map(|g| g.join()); } } impl Listening { /// Stop the server from listening to its socket address. pub fn close(&mut self) -> ::Result<()> { let _ = self._guard.take(); debug!("closing server"); Ok(()) } } /// A handler that can handle incoming requests for a server. pub trait Handler: Sync + Send { /// Receives a `Request`/`Response` pair, and should perform some action on them. /// /// This could reading from the request, and writing to the response. fn handle<'a, 'k>(&'a self, Request<'a, 'k>, Response<'a, Fresh>); /// Called when a Request includes a `Expect: 100-continue` header. /// /// By default, this will always immediately response with a `StatusCode::Continue`, /// but can be overridden with custom behavior. fn check_continue(&self, _: (&Method, &RequestUri, &Headers)) -> StatusCode { StatusCode::Continue } /// This is run after a connection is received, on a per-connection basis (not a /// per-request basis, as a connection with keep-alive may handle multiple /// requests) fn on_connection_start(&self) { } /// This is run before a connection is closed, on a per-connection basis (not a /// per-request basis, as a connection with keep-alive may handle multiple /// requests) fn on_connection_end(&self) { } } impl<F> Handler for F where F: Fn(Request, Response<Fresh>), F: Sync + Send { fn handle<'a, 'k>(&'a self, req: Request<'a, 'k>, res: Response<'a, Fresh>) { self(req, res) } } #[cfg(test)] mod tests { use header::Headers; use method::Method; use mock::MockStream; use status::StatusCode; use uri::RequestUri; use super::{Request, Response, Fresh, Handler, Worker}; #[test] fn test_check_continue_default() { let mut mock = MockStream::with_input(b"\ POST /upload HTTP/1.1\r\n\ Host: example.domain\r\n\ Expect: 100-continue\r\n\ Content-Length: 10\r\n\ \r\n\ 1234567890\ "); fn handle(_: Request, res: Response<Fresh>) { res.start().unwrap().end().unwrap(); } Worker::new(handle, Default::default()).handle_connection(&mut mock); let cont = b"HTTP/1.1 100 Continue\r\n\r\n"; assert_eq!(&mock.write[..cont.len()], cont); let res = b"HTTP/1.1 200 OK\r\n"; assert_eq!(&mock.write[cont.len()..cont.len() + res.len()], res); } #[test] fn test_check_continue_reject() { struct Reject; impl Handler for Reject { fn handle<'a, 'k>(&'a self, _: Request<'a, 'k>, res: Response<'a, Fresh>) { res.start().unwrap().end().unwrap(); } fn check_continue(&self, _: (&Method, &RequestUri, &Headers)) -> StatusCode { StatusCode::ExpectationFailed } } let mut mock = MockStream::with_input(b"\ POST /upload HTTP/1.1\r\n\ Host: example.domain\r\n\ Expect: 100-continue\r\n\ Content-Length: 10\r\n\ \r\n\ 1234567890\ "); Worker::new(Reject, Default::default()).handle_connection(&mut mock); assert_eq!(mock.write, &b"HTTP/1.1 417 Expectation Failed\r\n\r\n"[..]); } }
random_line_split
paths.rs
//! Path manipulation utilities use std::env; use std::ffi::{OsStr, OsString}; use std::fs::File; use std::fs::OpenOptions; use std::io::prelude::*; use std::path::{Path, PathBuf, Component}; use util::{human, internal, CraftResult, ChainError}; pub fn join_paths<T: AsRef<OsStr>>(paths: &[T], env: &str) -> CraftResult<OsString> { env::join_paths(paths.iter()).or_else(|e| { let paths = paths.iter().map(Path::new).collect::<Vec<_>>(); internal(format!("failed to join path array: {:?}", paths)).chain_error(|| { human(format!("failed to join search paths together: {}\n\ Does ${} have an unterminated quote character?", e, env)) }) }) } pub fn dylib_path_envvar() -> &'static str { if cfg!(windows) { "PATH" } else if cfg!(target_os = "macos") { "DYLD_LIBRARY_PATH" } else { "LD_LIBRARY_PATH" } } pub fn dylib_path() -> Vec<PathBuf> { match env::var_os(dylib_path_envvar()) { Some(var) => env::split_paths(&var).collect(), None => Vec::new(), } } pub fn normalize_path(path: &Path) -> PathBuf { let mut components = path.components().peekable(); let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek() .cloned() { components.next(); PathBuf::from(c.as_os_str()) } else { PathBuf::new() }; for component in components { match component { Component::Prefix(..) => unreachable!(), Component::RootDir => { ret.push(component.as_os_str()); } Component::CurDir =>
Component::ParentDir => { ret.pop(); } Component::Normal(c) => { ret.push(c); } } } ret } pub fn without_prefix<'a>(a: &'a Path, b: &'a Path) -> Option<&'a Path> { let mut a = a.components(); let mut b = b.components(); loop { match b.next() { Some(y) => { match a.next() { Some(x) if x == y => continue, _ => return None, } } None => return Some(a.as_path()), } } } pub fn read(path: &Path) -> CraftResult<String> { (|| -> CraftResult<_> { let mut ret = String::new(); let mut f = File::open(path)?; f.read_to_string(&mut ret)?; Ok(ret) })() .map_err(human) .chain_error(|| human(format!("failed to read `{}`", path.display()))) } pub fn read_bytes(path: &Path) -> CraftResult<Vec<u8>> { (|| -> CraftResult<_> { let mut ret = Vec::new(); let mut f = File::open(path)?; f.read_to_end(&mut ret)?; Ok(ret) })() .map_err(human) .chain_error(|| human(format!("failed to read `{}`", path.display()))) } pub fn write(path: &Path, contents: &[u8]) -> CraftResult<()> { (|| -> CraftResult<()> { let mut f = File::create(path)?; f.write_all(contents)?; Ok(()) })() .map_err(human) .chain_error(|| human(format!("failed to write `{}`", path.display()))) } pub fn append(path: &Path, contents: &[u8]) -> CraftResult<()> { (|| -> CraftResult<()> { let mut f = OpenOptions::new().write(true) .append(true) .create(true) .open(path)?; f.write_all(contents)?; Ok(()) }) .chain_error(|| internal(format!("failed to write `{}`", path.display()))) } #[cfg(unix)] pub fn path2bytes(path: &Path) -> CraftResult<&[u8]> { use std::os::unix::prelude::*; Ok(path.as_os_str().as_bytes()) } #[cfg(windows)] pub fn path2bytes(path: &Path) -> CraftResult<&[u8]> { match path.as_os_str().to_str() { Some(s) => Ok(s.as_bytes()), None => Err(human(format!("invalid non-unicode path: {}", path.display()))), } } #[cfg(unix)] pub fn bytes2path(bytes: &[u8]) -> CraftResult<PathBuf> { use std::os::unix::prelude::*; use std::ffi::OsStr; Ok(PathBuf::from(OsStr::from_bytes(bytes))) } #[cfg(windows)] pub fn bytes2path(bytes: &[u8]) -> CraftResult<PathBuf> { use std::str; match str::from_utf8(bytes) { Ok(s) => Ok(PathBuf::from(s)), Err(..) => Err(human("invalid non-unicode path")), } }
{}
conditional_block
paths.rs
//! Path manipulation utilities use std::env; use std::ffi::{OsStr, OsString}; use std::fs::File; use std::fs::OpenOptions; use std::io::prelude::*; use std::path::{Path, PathBuf, Component}; use util::{human, internal, CraftResult, ChainError}; pub fn join_paths<T: AsRef<OsStr>>(paths: &[T], env: &str) -> CraftResult<OsString> { env::join_paths(paths.iter()).or_else(|e| { let paths = paths.iter().map(Path::new).collect::<Vec<_>>(); internal(format!("failed to join path array: {:?}", paths)).chain_error(|| { human(format!("failed to join search paths together: {}\n\ Does ${} have an unterminated quote character?", e, env)) }) }) } pub fn dylib_path_envvar() -> &'static str { if cfg!(windows) { "PATH" } else if cfg!(target_os = "macos") { "DYLD_LIBRARY_PATH" } else { "LD_LIBRARY_PATH" } } pub fn dylib_path() -> Vec<PathBuf> { match env::var_os(dylib_path_envvar()) { Some(var) => env::split_paths(&var).collect(), None => Vec::new(),
} } pub fn normalize_path(path: &Path) -> PathBuf { let mut components = path.components().peekable(); let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek() .cloned() { components.next(); PathBuf::from(c.as_os_str()) } else { PathBuf::new() }; for component in components { match component { Component::Prefix(..) => unreachable!(), Component::RootDir => { ret.push(component.as_os_str()); } Component::CurDir => {} Component::ParentDir => { ret.pop(); } Component::Normal(c) => { ret.push(c); } } } ret } pub fn without_prefix<'a>(a: &'a Path, b: &'a Path) -> Option<&'a Path> { let mut a = a.components(); let mut b = b.components(); loop { match b.next() { Some(y) => { match a.next() { Some(x) if x == y => continue, _ => return None, } } None => return Some(a.as_path()), } } } pub fn read(path: &Path) -> CraftResult<String> { (|| -> CraftResult<_> { let mut ret = String::new(); let mut f = File::open(path)?; f.read_to_string(&mut ret)?; Ok(ret) })() .map_err(human) .chain_error(|| human(format!("failed to read `{}`", path.display()))) } pub fn read_bytes(path: &Path) -> CraftResult<Vec<u8>> { (|| -> CraftResult<_> { let mut ret = Vec::new(); let mut f = File::open(path)?; f.read_to_end(&mut ret)?; Ok(ret) })() .map_err(human) .chain_error(|| human(format!("failed to read `{}`", path.display()))) } pub fn write(path: &Path, contents: &[u8]) -> CraftResult<()> { (|| -> CraftResult<()> { let mut f = File::create(path)?; f.write_all(contents)?; Ok(()) })() .map_err(human) .chain_error(|| human(format!("failed to write `{}`", path.display()))) } pub fn append(path: &Path, contents: &[u8]) -> CraftResult<()> { (|| -> CraftResult<()> { let mut f = OpenOptions::new().write(true) .append(true) .create(true) .open(path)?; f.write_all(contents)?; Ok(()) }) .chain_error(|| internal(format!("failed to write `{}`", path.display()))) } #[cfg(unix)] pub fn path2bytes(path: &Path) -> CraftResult<&[u8]> { use std::os::unix::prelude::*; Ok(path.as_os_str().as_bytes()) } #[cfg(windows)] pub fn path2bytes(path: &Path) -> CraftResult<&[u8]> { match path.as_os_str().to_str() { Some(s) => Ok(s.as_bytes()), None => Err(human(format!("invalid non-unicode path: {}", path.display()))), } } #[cfg(unix)] pub fn bytes2path(bytes: &[u8]) -> CraftResult<PathBuf> { use std::os::unix::prelude::*; use std::ffi::OsStr; Ok(PathBuf::from(OsStr::from_bytes(bytes))) } #[cfg(windows)] pub fn bytes2path(bytes: &[u8]) -> CraftResult<PathBuf> { use std::str; match str::from_utf8(bytes) { Ok(s) => Ok(PathBuf::from(s)), Err(..) => Err(human("invalid non-unicode path")), } }
random_line_split
paths.rs
//! Path manipulation utilities use std::env; use std::ffi::{OsStr, OsString}; use std::fs::File; use std::fs::OpenOptions; use std::io::prelude::*; use std::path::{Path, PathBuf, Component}; use util::{human, internal, CraftResult, ChainError}; pub fn join_paths<T: AsRef<OsStr>>(paths: &[T], env: &str) -> CraftResult<OsString> { env::join_paths(paths.iter()).or_else(|e| { let paths = paths.iter().map(Path::new).collect::<Vec<_>>(); internal(format!("failed to join path array: {:?}", paths)).chain_error(|| { human(format!("failed to join search paths together: {}\n\ Does ${} have an unterminated quote character?", e, env)) }) }) } pub fn dylib_path_envvar() -> &'static str { if cfg!(windows) { "PATH" } else if cfg!(target_os = "macos") { "DYLD_LIBRARY_PATH" } else { "LD_LIBRARY_PATH" } } pub fn dylib_path() -> Vec<PathBuf> { match env::var_os(dylib_path_envvar()) { Some(var) => env::split_paths(&var).collect(), None => Vec::new(), } } pub fn normalize_path(path: &Path) -> PathBuf { let mut components = path.components().peekable(); let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek() .cloned() { components.next(); PathBuf::from(c.as_os_str()) } else { PathBuf::new() }; for component in components { match component { Component::Prefix(..) => unreachable!(), Component::RootDir => { ret.push(component.as_os_str()); } Component::CurDir => {} Component::ParentDir => { ret.pop(); } Component::Normal(c) => { ret.push(c); } } } ret } pub fn without_prefix<'a>(a: &'a Path, b: &'a Path) -> Option<&'a Path> { let mut a = a.components(); let mut b = b.components(); loop { match b.next() { Some(y) => { match a.next() { Some(x) if x == y => continue, _ => return None, } } None => return Some(a.as_path()), } } } pub fn read(path: &Path) -> CraftResult<String> { (|| -> CraftResult<_> { let mut ret = String::new(); let mut f = File::open(path)?; f.read_to_string(&mut ret)?; Ok(ret) })() .map_err(human) .chain_error(|| human(format!("failed to read `{}`", path.display()))) } pub fn read_bytes(path: &Path) -> CraftResult<Vec<u8>> { (|| -> CraftResult<_> { let mut ret = Vec::new(); let mut f = File::open(path)?; f.read_to_end(&mut ret)?; Ok(ret) })() .map_err(human) .chain_error(|| human(format!("failed to read `{}`", path.display()))) } pub fn write(path: &Path, contents: &[u8]) -> CraftResult<()> { (|| -> CraftResult<()> { let mut f = File::create(path)?; f.write_all(contents)?; Ok(()) })() .map_err(human) .chain_error(|| human(format!("failed to write `{}`", path.display()))) } pub fn append(path: &Path, contents: &[u8]) -> CraftResult<()> { (|| -> CraftResult<()> { let mut f = OpenOptions::new().write(true) .append(true) .create(true) .open(path)?; f.write_all(contents)?; Ok(()) }) .chain_error(|| internal(format!("failed to write `{}`", path.display()))) } #[cfg(unix)] pub fn path2bytes(path: &Path) -> CraftResult<&[u8]> { use std::os::unix::prelude::*; Ok(path.as_os_str().as_bytes()) } #[cfg(windows)] pub fn
(path: &Path) -> CraftResult<&[u8]> { match path.as_os_str().to_str() { Some(s) => Ok(s.as_bytes()), None => Err(human(format!("invalid non-unicode path: {}", path.display()))), } } #[cfg(unix)] pub fn bytes2path(bytes: &[u8]) -> CraftResult<PathBuf> { use std::os::unix::prelude::*; use std::ffi::OsStr; Ok(PathBuf::from(OsStr::from_bytes(bytes))) } #[cfg(windows)] pub fn bytes2path(bytes: &[u8]) -> CraftResult<PathBuf> { use std::str; match str::from_utf8(bytes) { Ok(s) => Ok(PathBuf::from(s)), Err(..) => Err(human("invalid non-unicode path")), } }
path2bytes
identifier_name
paths.rs
//! Path manipulation utilities use std::env; use std::ffi::{OsStr, OsString}; use std::fs::File; use std::fs::OpenOptions; use std::io::prelude::*; use std::path::{Path, PathBuf, Component}; use util::{human, internal, CraftResult, ChainError}; pub fn join_paths<T: AsRef<OsStr>>(paths: &[T], env: &str) -> CraftResult<OsString> { env::join_paths(paths.iter()).or_else(|e| { let paths = paths.iter().map(Path::new).collect::<Vec<_>>(); internal(format!("failed to join path array: {:?}", paths)).chain_error(|| { human(format!("failed to join search paths together: {}\n\ Does ${} have an unterminated quote character?", e, env)) }) }) } pub fn dylib_path_envvar() -> &'static str { if cfg!(windows) { "PATH" } else if cfg!(target_os = "macos") { "DYLD_LIBRARY_PATH" } else { "LD_LIBRARY_PATH" } } pub fn dylib_path() -> Vec<PathBuf> { match env::var_os(dylib_path_envvar()) { Some(var) => env::split_paths(&var).collect(), None => Vec::new(), } } pub fn normalize_path(path: &Path) -> PathBuf { let mut components = path.components().peekable(); let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek() .cloned() { components.next(); PathBuf::from(c.as_os_str()) } else { PathBuf::new() }; for component in components { match component { Component::Prefix(..) => unreachable!(), Component::RootDir => { ret.push(component.as_os_str()); } Component::CurDir => {} Component::ParentDir => { ret.pop(); } Component::Normal(c) => { ret.push(c); } } } ret } pub fn without_prefix<'a>(a: &'a Path, b: &'a Path) -> Option<&'a Path> { let mut a = a.components(); let mut b = b.components(); loop { match b.next() { Some(y) => { match a.next() { Some(x) if x == y => continue, _ => return None, } } None => return Some(a.as_path()), } } } pub fn read(path: &Path) -> CraftResult<String>
pub fn read_bytes(path: &Path) -> CraftResult<Vec<u8>> { (|| -> CraftResult<_> { let mut ret = Vec::new(); let mut f = File::open(path)?; f.read_to_end(&mut ret)?; Ok(ret) })() .map_err(human) .chain_error(|| human(format!("failed to read `{}`", path.display()))) } pub fn write(path: &Path, contents: &[u8]) -> CraftResult<()> { (|| -> CraftResult<()> { let mut f = File::create(path)?; f.write_all(contents)?; Ok(()) })() .map_err(human) .chain_error(|| human(format!("failed to write `{}`", path.display()))) } pub fn append(path: &Path, contents: &[u8]) -> CraftResult<()> { (|| -> CraftResult<()> { let mut f = OpenOptions::new().write(true) .append(true) .create(true) .open(path)?; f.write_all(contents)?; Ok(()) }) .chain_error(|| internal(format!("failed to write `{}`", path.display()))) } #[cfg(unix)] pub fn path2bytes(path: &Path) -> CraftResult<&[u8]> { use std::os::unix::prelude::*; Ok(path.as_os_str().as_bytes()) } #[cfg(windows)] pub fn path2bytes(path: &Path) -> CraftResult<&[u8]> { match path.as_os_str().to_str() { Some(s) => Ok(s.as_bytes()), None => Err(human(format!("invalid non-unicode path: {}", path.display()))), } } #[cfg(unix)] pub fn bytes2path(bytes: &[u8]) -> CraftResult<PathBuf> { use std::os::unix::prelude::*; use std::ffi::OsStr; Ok(PathBuf::from(OsStr::from_bytes(bytes))) } #[cfg(windows)] pub fn bytes2path(bytes: &[u8]) -> CraftResult<PathBuf> { use std::str; match str::from_utf8(bytes) { Ok(s) => Ok(PathBuf::from(s)), Err(..) => Err(human("invalid non-unicode path")), } }
{ (|| -> CraftResult<_> { let mut ret = String::new(); let mut f = File::open(path)?; f.read_to_string(&mut ret)?; Ok(ret) })() .map_err(human) .chain_error(|| human(format!("failed to read `{}`", path.display()))) }
identifier_body
network.rs
// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. use std::io; use std::io::Write; use std::net::SocketAddr; use std::ops::Deref; use std::thread; use std::time; use std::time::{SystemTime, UNIX_EPOCH}; use rand::{thread_rng, Rng}; use deps::bitcoin::network::address as btc_network_address; use deps::bitcoin::network::constants as btc_constants; use deps::bitcoin::network::encodable::{ConsensusDecodable, ConsensusEncodable}; use deps::bitcoin::network::message as btc_message; use deps::bitcoin::network::message_blockdata as btc_message_blockdata; use deps::bitcoin::network::message_network as btc_message_network; use deps::bitcoin::network::serialize as btc_serialize; use deps::bitcoin::network::serialize::{RawDecoder, RawEncoder}; use deps::bitcoin::util::hash::Sha256dHash; use burnchains::bitcoin::indexer::{network_id_to_bytes, BitcoinIndexer}; use burnchains::bitcoin::messages::BitcoinMessageHandler; use burnchains::bitcoin::Error as btc_error; use burnchains::bitcoin::PeerMessage; use burnchains::indexer::BurnchainIndexer; use util::get_epoch_time_secs; use util::log; // Based on Andrew Poelstra's rust-bitcoin library. impl BitcoinIndexer { /// Send a Bitcoin protocol message on the wire pub fn send_message(&mut self, payload: btc_message::NetworkMessage) -> Result<(), btc_error> { let message = btc_message::RawNetworkMessage { magic: network_id_to_bytes(self.runtime.network_id), payload: payload, }; self.with_socket(|ref mut sock| { message .consensus_encode(&mut RawEncoder::new(&mut *sock)) .map_err(btc_error::SerializationError)?; sock.flush().map_err(btc_error::Io) }) } /// Receive a Bitcoin protocol message on the wire /// If this method returns Err(ConnectionBroken), then the caller should attempt to re-connect. pub fn recv_message(&mut self) -> Result<PeerMessage, btc_error> { let magic = network_id_to_bytes(self.runtime.network_id); self.with_socket(|ref mut sock| { // read the message off the wire let mut decoder = RawDecoder::new(sock); let decoded: btc_message::RawNetworkMessage = ConsensusDecodable::consensus_decode(&mut decoder).map_err(|e| { // if we can't finish a recv(), then report that the connection is broken match e { btc_serialize::Error::Io(ref io_error) => { if io_error.kind() == io::ErrorKind::UnexpectedEof { btc_error::ConnectionBroken } else { btc_error::Io(io::Error::new( io_error.kind(), "I/O error when processing message", )) } } _ => btc_error::SerializationError(e), } })?; // sanity check -- must match our network if decoded.magic!= magic { return Err(btc_error::InvalidMagic); } Ok(decoded.payload) }) } /// Get sender address from our socket pub fn get_local_sockaddr(&mut self) -> Result<SocketAddr, btc_error> { self.with_socket(|ref mut sock| sock.local_addr().map_err(btc_error::Io)) } /// Get receiver address from our socket pub fn get_remote_sockaddr(&mut self) -> Result<SocketAddr, btc_error> { self.with_socket(|ref mut sock| sock.peer_addr().map_err(btc_error::Io)) } /// Handle and consume message we received, if we can. /// Returns UnhandledMessage if we can't handle the given message. pub fn handle_message<T: BitcoinMessageHandler>( &mut self, message: PeerMessage, handler: Option<&mut T>, ) -> Result<bool, btc_error> { if self.runtime.last_getdata_send_time > 0 && self.runtime.last_getdata_send_time + self.runtime.timeout < get_epoch_time_secs() { warn!("Timed out waiting for block data. Killing connection."); return Err(btc_error::TimedOut); } if self.runtime.last_getheaders_send_time > 0 && self.runtime.last_getheaders_send_time + self.runtime.timeout < get_epoch_time_secs() { warn!("Timed out waiting for headers data. Killing connection."); return Err(btc_error::TimedOut); } // classify the message here, so we can pass it along to the handler explicitly match message { btc_message::NetworkMessage::Version(..) => { return self.handle_version(message).and_then(|_r| Ok(true)); } btc_message::NetworkMessage::Verack => { return self.handle_verack(message).and_then(|_r| Ok(true)); } btc_message::NetworkMessage::Ping(..) => { return self.handle_ping(message).and_then(|_r| Ok(true)); } btc_message::NetworkMessage::Pong(..) => { return self.handle_pong(message).and_then(|_r| Ok(true)); } _ => match handler { Some(custom_handler) => custom_handler.handle_message(self, message.clone()), None => Err(btc_error::UnhandledMessage(message.clone())), }, } } /// Do the initial handshake to the remote peer. /// Returns the remote peer's block height pub fn peer_handshake(&mut self) -> Result<u64, btc_error> { debug!( "Begin peer handshake to {}:{}", self.config.peer_host, self.config.peer_port ); self.send_version()?; let version_reply = self.recv_message()?; self.handle_version(version_reply)?; let verack_reply = self.recv_message()?; self.handle_verack(verack_reply)?; debug!( "Established connection to {}:{}, who has {} blocks", self.config.peer_host, self.config.peer_port, self.runtime.block_height ); Ok(self.runtime.block_height) } /// Connect to a remote peer, do a handshake with the remote peer, and use exponential backoff until we /// succeed in establishing a connection. /// This method masks ConnectionBroken errors, but does not mask other network errors. /// Returns the remote peer's block height on success pub fn connect_handshake_backoff(&mut self) -> Result<u64, btc_error> { let mut backoff: f64 = 1.0; let mut rng = thread_rng(); loop { let connection_result = self.connect(); match connection_result { Ok(()) => { // connected! now do the handshake let handshake_result = self.peer_handshake(); match handshake_result { Ok(block_height) => { // connected! return Ok(block_height); } Err(btc_error::ConnectionBroken) => { // need to try again backoff = 2.0 * backoff + (backoff * rng.gen_range(0.0, 1.0)); } Err(e) => { // propagate other network error warn!( "Failed to handshake with {}:{}: {:?}", &self.config.peer_host, self.config.peer_port, &e ); return Err(e); } } } Err(err_msg) => { error!( "Failed to connect to peer {}:{}: {}", &self.config.peer_host, self.config.peer_port, err_msg ); backoff = 2.0 * backoff + (backoff * rng.gen_range(0.0, 1.0)); } } // don't sleep more than 60 seconds if backoff > 60.0 { backoff = 60.0; } if backoff > 10.0 { warn!("Connection broken; retrying in {} sec...", backoff); } let duration = time::Duration::from_millis((backoff * 1_000.0) as u64); thread::sleep(duration); } } /// Send a Version message pub fn send_version(&mut self) -> Result<(), btc_error> { let timestamp = match SystemTime::now().duration_since(UNIX_EPOCH) { Ok(dur) => dur, Err(err) => err.duration(), } .as_secs() as i64; let local_addr = self.get_local_sockaddr()?; let remote_addr = self.get_remote_sockaddr()?; let sender_address = btc_network_address::Address::new(&local_addr, 0); let remote_address = btc_network_address::Address::new(&remote_addr, 0); let payload = btc_message_network::VersionMessage { version: btc_constants::PROTOCOL_VERSION, services: 0, timestamp: timestamp, receiver: remote_address, sender: sender_address, nonce: self.runtime.version_nonce, user_agent: self.runtime.user_agent.to_owned(), start_height: 0, relay: false, }; debug!( "Send version (nonce={}) to {}:{}", self.runtime.version_nonce, self.config.peer_host, self.config.peer_port ); self.send_message(btc_message::NetworkMessage::Version(payload)) } /// Receive a Version message and reply with a Verack pub fn handle_version(&mut self, version_message: PeerMessage) -> Result<(), btc_error> { match version_message { btc_message::NetworkMessage::Version(msg_body) => { debug!( "Handle version -- remote peer blockchain height is {}", msg_body.start_height ); self.runtime.block_height = msg_body.start_height as u64; return self.send_verack(); } _ => { error!("Did not receive version, but got {:?}", version_message); } }; return Err(btc_error::InvalidMessage(version_message)); } /// Send a verack pub fn send_verack(&mut self) -> Result<(), btc_error> { let payload = btc_message::NetworkMessage::Verack; debug!("Send verack"); self.send_message(payload) } /// Handle a verack we received. /// Does nothing. pub fn handle_verack(&mut self, verack_message: PeerMessage) -> Result<(), btc_error> { match verack_message { btc_message::NetworkMessage::Verack => { debug!("Handle verack"); return Ok(()); } _ => { error!("Did not receive verack, but got {:?}", verack_message); } }; Err(btc_error::InvalidMessage(verack_message)) } /// Respond to a Ping message by sending a Pong message pub fn handle_ping(&mut self, ping_message: PeerMessage) -> Result<(), btc_error> { match ping_message { btc_message::NetworkMessage::Ping(ref n) => { debug!("Handle ping {}", n); let payload = btc_message::NetworkMessage::Pong(*n); debug!("Send pong {}", n); return self.send_message(payload); } _ => { error!("Did not receive ping, but got {:?}", ping_message); } }; Err(btc_error::InvalidMessage(ping_message)) } /// Respond to a Pong message. /// Does nothing. pub fn handle_pong(&mut self, pong_message: PeerMessage) -> Result<(), btc_error> { match pong_message { btc_message::NetworkMessage::Pong(n) => { debug!("Handle pong {}", n); return Ok(()); } _ => { error!("Did not receive pong, but got {:?}", pong_message); } }; Err(btc_error::InvalidReply) } /// Send a GetHeaders message /// Note that this isn't a generic GetHeaders message -- you should use this only to ask /// for a batch of 2,000 block hashes after this given hash. pub fn send_getheaders(&mut self, prev_block_hash: Sha256dHash) -> Result<(), btc_error> { let getheaders = btc_message_blockdata::GetHeadersMessage::new(vec![prev_block_hash], prev_block_hash); let payload = btc_message::NetworkMessage::GetHeaders(getheaders); debug!( "Send GetHeaders {} for 2000 headers to {}:{}", prev_block_hash.be_hex_string(), self.config.peer_host, self.config.peer_port ); self.runtime.last_getheaders_send_time = get_epoch_time_secs(); self.send_message(payload) } /// Send a GetData message pub fn send_getdata(&mut self, block_hashes: &Vec<Sha256dHash>) -> Result<(), btc_error>
self.send_message(getdata) } }
{ assert!(block_hashes.len() > 0); let getdata_invs = block_hashes .iter() .map(|h| btc_message_blockdata::Inventory { inv_type: btc_message_blockdata::InvType::Block, hash: h.clone(), }) .collect(); let getdata = btc_message::NetworkMessage::GetData(getdata_invs); self.runtime.last_getdata_send_time = get_epoch_time_secs(); debug!( "Send GetData {}-{} to {}:{}", block_hashes[0].be_hex_string(), block_hashes[block_hashes.len() - 1].be_hex_string(), self.config.peer_host, self.config.peer_port );
identifier_body
network.rs
// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. use std::io; use std::io::Write; use std::net::SocketAddr; use std::ops::Deref; use std::thread; use std::time; use std::time::{SystemTime, UNIX_EPOCH}; use rand::{thread_rng, Rng}; use deps::bitcoin::network::address as btc_network_address; use deps::bitcoin::network::constants as btc_constants; use deps::bitcoin::network::encodable::{ConsensusDecodable, ConsensusEncodable}; use deps::bitcoin::network::message as btc_message; use deps::bitcoin::network::message_blockdata as btc_message_blockdata; use deps::bitcoin::network::message_network as btc_message_network; use deps::bitcoin::network::serialize as btc_serialize; use deps::bitcoin::network::serialize::{RawDecoder, RawEncoder}; use deps::bitcoin::util::hash::Sha256dHash; use burnchains::bitcoin::indexer::{network_id_to_bytes, BitcoinIndexer}; use burnchains::bitcoin::messages::BitcoinMessageHandler; use burnchains::bitcoin::Error as btc_error; use burnchains::bitcoin::PeerMessage; use burnchains::indexer::BurnchainIndexer; use util::get_epoch_time_secs; use util::log; // Based on Andrew Poelstra's rust-bitcoin library. impl BitcoinIndexer { /// Send a Bitcoin protocol message on the wire pub fn send_message(&mut self, payload: btc_message::NetworkMessage) -> Result<(), btc_error> { let message = btc_message::RawNetworkMessage { magic: network_id_to_bytes(self.runtime.network_id), payload: payload, }; self.with_socket(|ref mut sock| { message .consensus_encode(&mut RawEncoder::new(&mut *sock)) .map_err(btc_error::SerializationError)?; sock.flush().map_err(btc_error::Io) }) } /// Receive a Bitcoin protocol message on the wire /// If this method returns Err(ConnectionBroken), then the caller should attempt to re-connect. pub fn recv_message(&mut self) -> Result<PeerMessage, btc_error> { let magic = network_id_to_bytes(self.runtime.network_id); self.with_socket(|ref mut sock| { // read the message off the wire let mut decoder = RawDecoder::new(sock); let decoded: btc_message::RawNetworkMessage = ConsensusDecodable::consensus_decode(&mut decoder).map_err(|e| { // if we can't finish a recv(), then report that the connection is broken match e { btc_serialize::Error::Io(ref io_error) => { if io_error.kind() == io::ErrorKind::UnexpectedEof { btc_error::ConnectionBroken } else { btc_error::Io(io::Error::new( io_error.kind(), "I/O error when processing message", )) } } _ => btc_error::SerializationError(e), } })?; // sanity check -- must match our network if decoded.magic!= magic { return Err(btc_error::InvalidMagic); } Ok(decoded.payload) }) } /// Get sender address from our socket pub fn get_local_sockaddr(&mut self) -> Result<SocketAddr, btc_error> { self.with_socket(|ref mut sock| sock.local_addr().map_err(btc_error::Io)) } /// Get receiver address from our socket pub fn get_remote_sockaddr(&mut self) -> Result<SocketAddr, btc_error> { self.with_socket(|ref mut sock| sock.peer_addr().map_err(btc_error::Io)) } /// Handle and consume message we received, if we can. /// Returns UnhandledMessage if we can't handle the given message. pub fn handle_message<T: BitcoinMessageHandler>( &mut self, message: PeerMessage, handler: Option<&mut T>, ) -> Result<bool, btc_error> { if self.runtime.last_getdata_send_time > 0 && self.runtime.last_getdata_send_time + self.runtime.timeout < get_epoch_time_secs() { warn!("Timed out waiting for block data. Killing connection."); return Err(btc_error::TimedOut); } if self.runtime.last_getheaders_send_time > 0 && self.runtime.last_getheaders_send_time + self.runtime.timeout < get_epoch_time_secs() { warn!("Timed out waiting for headers data. Killing connection."); return Err(btc_error::TimedOut); } // classify the message here, so we can pass it along to the handler explicitly match message { btc_message::NetworkMessage::Version(..) =>
btc_message::NetworkMessage::Verack => { return self.handle_verack(message).and_then(|_r| Ok(true)); } btc_message::NetworkMessage::Ping(..) => { return self.handle_ping(message).and_then(|_r| Ok(true)); } btc_message::NetworkMessage::Pong(..) => { return self.handle_pong(message).and_then(|_r| Ok(true)); } _ => match handler { Some(custom_handler) => custom_handler.handle_message(self, message.clone()), None => Err(btc_error::UnhandledMessage(message.clone())), }, } } /// Do the initial handshake to the remote peer. /// Returns the remote peer's block height pub fn peer_handshake(&mut self) -> Result<u64, btc_error> { debug!( "Begin peer handshake to {}:{}", self.config.peer_host, self.config.peer_port ); self.send_version()?; let version_reply = self.recv_message()?; self.handle_version(version_reply)?; let verack_reply = self.recv_message()?; self.handle_verack(verack_reply)?; debug!( "Established connection to {}:{}, who has {} blocks", self.config.peer_host, self.config.peer_port, self.runtime.block_height ); Ok(self.runtime.block_height) } /// Connect to a remote peer, do a handshake with the remote peer, and use exponential backoff until we /// succeed in establishing a connection. /// This method masks ConnectionBroken errors, but does not mask other network errors. /// Returns the remote peer's block height on success pub fn connect_handshake_backoff(&mut self) -> Result<u64, btc_error> { let mut backoff: f64 = 1.0; let mut rng = thread_rng(); loop { let connection_result = self.connect(); match connection_result { Ok(()) => { // connected! now do the handshake let handshake_result = self.peer_handshake(); match handshake_result { Ok(block_height) => { // connected! return Ok(block_height); } Err(btc_error::ConnectionBroken) => { // need to try again backoff = 2.0 * backoff + (backoff * rng.gen_range(0.0, 1.0)); } Err(e) => { // propagate other network error warn!( "Failed to handshake with {}:{}: {:?}", &self.config.peer_host, self.config.peer_port, &e ); return Err(e); } } } Err(err_msg) => { error!( "Failed to connect to peer {}:{}: {}", &self.config.peer_host, self.config.peer_port, err_msg ); backoff = 2.0 * backoff + (backoff * rng.gen_range(0.0, 1.0)); } } // don't sleep more than 60 seconds if backoff > 60.0 { backoff = 60.0; } if backoff > 10.0 { warn!("Connection broken; retrying in {} sec...", backoff); } let duration = time::Duration::from_millis((backoff * 1_000.0) as u64); thread::sleep(duration); } } /// Send a Version message pub fn send_version(&mut self) -> Result<(), btc_error> { let timestamp = match SystemTime::now().duration_since(UNIX_EPOCH) { Ok(dur) => dur, Err(err) => err.duration(), } .as_secs() as i64; let local_addr = self.get_local_sockaddr()?; let remote_addr = self.get_remote_sockaddr()?; let sender_address = btc_network_address::Address::new(&local_addr, 0); let remote_address = btc_network_address::Address::new(&remote_addr, 0); let payload = btc_message_network::VersionMessage { version: btc_constants::PROTOCOL_VERSION, services: 0, timestamp: timestamp, receiver: remote_address, sender: sender_address, nonce: self.runtime.version_nonce, user_agent: self.runtime.user_agent.to_owned(), start_height: 0, relay: false, }; debug!( "Send version (nonce={}) to {}:{}", self.runtime.version_nonce, self.config.peer_host, self.config.peer_port ); self.send_message(btc_message::NetworkMessage::Version(payload)) } /// Receive a Version message and reply with a Verack pub fn handle_version(&mut self, version_message: PeerMessage) -> Result<(), btc_error> { match version_message { btc_message::NetworkMessage::Version(msg_body) => { debug!( "Handle version -- remote peer blockchain height is {}", msg_body.start_height ); self.runtime.block_height = msg_body.start_height as u64; return self.send_verack(); } _ => { error!("Did not receive version, but got {:?}", version_message); } }; return Err(btc_error::InvalidMessage(version_message)); } /// Send a verack pub fn send_verack(&mut self) -> Result<(), btc_error> { let payload = btc_message::NetworkMessage::Verack; debug!("Send verack"); self.send_message(payload) } /// Handle a verack we received. /// Does nothing. pub fn handle_verack(&mut self, verack_message: PeerMessage) -> Result<(), btc_error> { match verack_message { btc_message::NetworkMessage::Verack => { debug!("Handle verack"); return Ok(()); } _ => { error!("Did not receive verack, but got {:?}", verack_message); } }; Err(btc_error::InvalidMessage(verack_message)) } /// Respond to a Ping message by sending a Pong message pub fn handle_ping(&mut self, ping_message: PeerMessage) -> Result<(), btc_error> { match ping_message { btc_message::NetworkMessage::Ping(ref n) => { debug!("Handle ping {}", n); let payload = btc_message::NetworkMessage::Pong(*n); debug!("Send pong {}", n); return self.send_message(payload); } _ => { error!("Did not receive ping, but got {:?}", ping_message); } }; Err(btc_error::InvalidMessage(ping_message)) } /// Respond to a Pong message. /// Does nothing. pub fn handle_pong(&mut self, pong_message: PeerMessage) -> Result<(), btc_error> { match pong_message { btc_message::NetworkMessage::Pong(n) => { debug!("Handle pong {}", n); return Ok(()); } _ => { error!("Did not receive pong, but got {:?}", pong_message); } }; Err(btc_error::InvalidReply) } /// Send a GetHeaders message /// Note that this isn't a generic GetHeaders message -- you should use this only to ask /// for a batch of 2,000 block hashes after this given hash. pub fn send_getheaders(&mut self, prev_block_hash: Sha256dHash) -> Result<(), btc_error> { let getheaders = btc_message_blockdata::GetHeadersMessage::new(vec![prev_block_hash], prev_block_hash); let payload = btc_message::NetworkMessage::GetHeaders(getheaders); debug!( "Send GetHeaders {} for 2000 headers to {}:{}", prev_block_hash.be_hex_string(), self.config.peer_host, self.config.peer_port ); self.runtime.last_getheaders_send_time = get_epoch_time_secs(); self.send_message(payload) } /// Send a GetData message pub fn send_getdata(&mut self, block_hashes: &Vec<Sha256dHash>) -> Result<(), btc_error> { assert!(block_hashes.len() > 0); let getdata_invs = block_hashes .iter() .map(|h| btc_message_blockdata::Inventory { inv_type: btc_message_blockdata::InvType::Block, hash: h.clone(), }) .collect(); let getdata = btc_message::NetworkMessage::GetData(getdata_invs); self.runtime.last_getdata_send_time = get_epoch_time_secs(); debug!( "Send GetData {}-{} to {}:{}", block_hashes[0].be_hex_string(), block_hashes[block_hashes.len() - 1].be_hex_string(), self.config.peer_host, self.config.peer_port ); self.send_message(getdata) } }
{ return self.handle_version(message).and_then(|_r| Ok(true)); }
conditional_block
network.rs
// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. use std::io; use std::io::Write; use std::net::SocketAddr; use std::ops::Deref; use std::thread; use std::time; use std::time::{SystemTime, UNIX_EPOCH}; use rand::{thread_rng, Rng}; use deps::bitcoin::network::address as btc_network_address; use deps::bitcoin::network::constants as btc_constants; use deps::bitcoin::network::encodable::{ConsensusDecodable, ConsensusEncodable}; use deps::bitcoin::network::message as btc_message; use deps::bitcoin::network::message_blockdata as btc_message_blockdata; use deps::bitcoin::network::message_network as btc_message_network; use deps::bitcoin::network::serialize as btc_serialize; use deps::bitcoin::network::serialize::{RawDecoder, RawEncoder}; use deps::bitcoin::util::hash::Sha256dHash; use burnchains::bitcoin::indexer::{network_id_to_bytes, BitcoinIndexer}; use burnchains::bitcoin::messages::BitcoinMessageHandler; use burnchains::bitcoin::Error as btc_error; use burnchains::bitcoin::PeerMessage; use burnchains::indexer::BurnchainIndexer; use util::get_epoch_time_secs; use util::log; // Based on Andrew Poelstra's rust-bitcoin library. impl BitcoinIndexer { /// Send a Bitcoin protocol message on the wire pub fn send_message(&mut self, payload: btc_message::NetworkMessage) -> Result<(), btc_error> { let message = btc_message::RawNetworkMessage { magic: network_id_to_bytes(self.runtime.network_id), payload: payload, }; self.with_socket(|ref mut sock| { message .consensus_encode(&mut RawEncoder::new(&mut *sock)) .map_err(btc_error::SerializationError)?; sock.flush().map_err(btc_error::Io) }) } /// Receive a Bitcoin protocol message on the wire /// If this method returns Err(ConnectionBroken), then the caller should attempt to re-connect. pub fn recv_message(&mut self) -> Result<PeerMessage, btc_error> { let magic = network_id_to_bytes(self.runtime.network_id); self.with_socket(|ref mut sock| { // read the message off the wire let mut decoder = RawDecoder::new(sock); let decoded: btc_message::RawNetworkMessage = ConsensusDecodable::consensus_decode(&mut decoder).map_err(|e| { // if we can't finish a recv(), then report that the connection is broken match e { btc_serialize::Error::Io(ref io_error) => { if io_error.kind() == io::ErrorKind::UnexpectedEof { btc_error::ConnectionBroken } else { btc_error::Io(io::Error::new( io_error.kind(), "I/O error when processing message", )) } } _ => btc_error::SerializationError(e), } })?; // sanity check -- must match our network if decoded.magic!= magic { return Err(btc_error::InvalidMagic); } Ok(decoded.payload) }) } /// Get sender address from our socket pub fn get_local_sockaddr(&mut self) -> Result<SocketAddr, btc_error> { self.with_socket(|ref mut sock| sock.local_addr().map_err(btc_error::Io)) } /// Get receiver address from our socket pub fn get_remote_sockaddr(&mut self) -> Result<SocketAddr, btc_error> { self.with_socket(|ref mut sock| sock.peer_addr().map_err(btc_error::Io)) } /// Handle and consume message we received, if we can. /// Returns UnhandledMessage if we can't handle the given message. pub fn handle_message<T: BitcoinMessageHandler>( &mut self, message: PeerMessage, handler: Option<&mut T>, ) -> Result<bool, btc_error> { if self.runtime.last_getdata_send_time > 0 && self.runtime.last_getdata_send_time + self.runtime.timeout < get_epoch_time_secs() { warn!("Timed out waiting for block data. Killing connection."); return Err(btc_error::TimedOut); } if self.runtime.last_getheaders_send_time > 0 && self.runtime.last_getheaders_send_time + self.runtime.timeout < get_epoch_time_secs() { warn!("Timed out waiting for headers data. Killing connection."); return Err(btc_error::TimedOut); } // classify the message here, so we can pass it along to the handler explicitly match message { btc_message::NetworkMessage::Version(..) => { return self.handle_version(message).and_then(|_r| Ok(true)); } btc_message::NetworkMessage::Verack => { return self.handle_verack(message).and_then(|_r| Ok(true)); } btc_message::NetworkMessage::Ping(..) => { return self.handle_ping(message).and_then(|_r| Ok(true)); } btc_message::NetworkMessage::Pong(..) => { return self.handle_pong(message).and_then(|_r| Ok(true)); } _ => match handler { Some(custom_handler) => custom_handler.handle_message(self, message.clone()), None => Err(btc_error::UnhandledMessage(message.clone())), }, } } /// Do the initial handshake to the remote peer. /// Returns the remote peer's block height pub fn peer_handshake(&mut self) -> Result<u64, btc_error> { debug!( "Begin peer handshake to {}:{}", self.config.peer_host, self.config.peer_port ); self.send_version()?; let version_reply = self.recv_message()?; self.handle_version(version_reply)?; let verack_reply = self.recv_message()?; self.handle_verack(verack_reply)?; debug!( "Established connection to {}:{}, who has {} blocks", self.config.peer_host, self.config.peer_port, self.runtime.block_height ); Ok(self.runtime.block_height) } /// Connect to a remote peer, do a handshake with the remote peer, and use exponential backoff until we /// succeed in establishing a connection. /// This method masks ConnectionBroken errors, but does not mask other network errors. /// Returns the remote peer's block height on success pub fn connect_handshake_backoff(&mut self) -> Result<u64, btc_error> { let mut backoff: f64 = 1.0; let mut rng = thread_rng(); loop { let connection_result = self.connect(); match connection_result { Ok(()) => { // connected! now do the handshake let handshake_result = self.peer_handshake(); match handshake_result { Ok(block_height) => { // connected! return Ok(block_height); } Err(btc_error::ConnectionBroken) => { // need to try again
// propagate other network error warn!( "Failed to handshake with {}:{}: {:?}", &self.config.peer_host, self.config.peer_port, &e ); return Err(e); } } } Err(err_msg) => { error!( "Failed to connect to peer {}:{}: {}", &self.config.peer_host, self.config.peer_port, err_msg ); backoff = 2.0 * backoff + (backoff * rng.gen_range(0.0, 1.0)); } } // don't sleep more than 60 seconds if backoff > 60.0 { backoff = 60.0; } if backoff > 10.0 { warn!("Connection broken; retrying in {} sec...", backoff); } let duration = time::Duration::from_millis((backoff * 1_000.0) as u64); thread::sleep(duration); } } /// Send a Version message pub fn send_version(&mut self) -> Result<(), btc_error> { let timestamp = match SystemTime::now().duration_since(UNIX_EPOCH) { Ok(dur) => dur, Err(err) => err.duration(), } .as_secs() as i64; let local_addr = self.get_local_sockaddr()?; let remote_addr = self.get_remote_sockaddr()?; let sender_address = btc_network_address::Address::new(&local_addr, 0); let remote_address = btc_network_address::Address::new(&remote_addr, 0); let payload = btc_message_network::VersionMessage { version: btc_constants::PROTOCOL_VERSION, services: 0, timestamp: timestamp, receiver: remote_address, sender: sender_address, nonce: self.runtime.version_nonce, user_agent: self.runtime.user_agent.to_owned(), start_height: 0, relay: false, }; debug!( "Send version (nonce={}) to {}:{}", self.runtime.version_nonce, self.config.peer_host, self.config.peer_port ); self.send_message(btc_message::NetworkMessage::Version(payload)) } /// Receive a Version message and reply with a Verack pub fn handle_version(&mut self, version_message: PeerMessage) -> Result<(), btc_error> { match version_message { btc_message::NetworkMessage::Version(msg_body) => { debug!( "Handle version -- remote peer blockchain height is {}", msg_body.start_height ); self.runtime.block_height = msg_body.start_height as u64; return self.send_verack(); } _ => { error!("Did not receive version, but got {:?}", version_message); } }; return Err(btc_error::InvalidMessage(version_message)); } /// Send a verack pub fn send_verack(&mut self) -> Result<(), btc_error> { let payload = btc_message::NetworkMessage::Verack; debug!("Send verack"); self.send_message(payload) } /// Handle a verack we received. /// Does nothing. pub fn handle_verack(&mut self, verack_message: PeerMessage) -> Result<(), btc_error> { match verack_message { btc_message::NetworkMessage::Verack => { debug!("Handle verack"); return Ok(()); } _ => { error!("Did not receive verack, but got {:?}", verack_message); } }; Err(btc_error::InvalidMessage(verack_message)) } /// Respond to a Ping message by sending a Pong message pub fn handle_ping(&mut self, ping_message: PeerMessage) -> Result<(), btc_error> { match ping_message { btc_message::NetworkMessage::Ping(ref n) => { debug!("Handle ping {}", n); let payload = btc_message::NetworkMessage::Pong(*n); debug!("Send pong {}", n); return self.send_message(payload); } _ => { error!("Did not receive ping, but got {:?}", ping_message); } }; Err(btc_error::InvalidMessage(ping_message)) } /// Respond to a Pong message. /// Does nothing. pub fn handle_pong(&mut self, pong_message: PeerMessage) -> Result<(), btc_error> { match pong_message { btc_message::NetworkMessage::Pong(n) => { debug!("Handle pong {}", n); return Ok(()); } _ => { error!("Did not receive pong, but got {:?}", pong_message); } }; Err(btc_error::InvalidReply) } /// Send a GetHeaders message /// Note that this isn't a generic GetHeaders message -- you should use this only to ask /// for a batch of 2,000 block hashes after this given hash. pub fn send_getheaders(&mut self, prev_block_hash: Sha256dHash) -> Result<(), btc_error> { let getheaders = btc_message_blockdata::GetHeadersMessage::new(vec![prev_block_hash], prev_block_hash); let payload = btc_message::NetworkMessage::GetHeaders(getheaders); debug!( "Send GetHeaders {} for 2000 headers to {}:{}", prev_block_hash.be_hex_string(), self.config.peer_host, self.config.peer_port ); self.runtime.last_getheaders_send_time = get_epoch_time_secs(); self.send_message(payload) } /// Send a GetData message pub fn send_getdata(&mut self, block_hashes: &Vec<Sha256dHash>) -> Result<(), btc_error> { assert!(block_hashes.len() > 0); let getdata_invs = block_hashes .iter() .map(|h| btc_message_blockdata::Inventory { inv_type: btc_message_blockdata::InvType::Block, hash: h.clone(), }) .collect(); let getdata = btc_message::NetworkMessage::GetData(getdata_invs); self.runtime.last_getdata_send_time = get_epoch_time_secs(); debug!( "Send GetData {}-{} to {}:{}", block_hashes[0].be_hex_string(), block_hashes[block_hashes.len() - 1].be_hex_string(), self.config.peer_host, self.config.peer_port ); self.send_message(getdata) } }
backoff = 2.0 * backoff + (backoff * rng.gen_range(0.0, 1.0)); } Err(e) => {
random_line_split
network.rs
// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. use std::io; use std::io::Write; use std::net::SocketAddr; use std::ops::Deref; use std::thread; use std::time; use std::time::{SystemTime, UNIX_EPOCH}; use rand::{thread_rng, Rng}; use deps::bitcoin::network::address as btc_network_address; use deps::bitcoin::network::constants as btc_constants; use deps::bitcoin::network::encodable::{ConsensusDecodable, ConsensusEncodable}; use deps::bitcoin::network::message as btc_message; use deps::bitcoin::network::message_blockdata as btc_message_blockdata; use deps::bitcoin::network::message_network as btc_message_network; use deps::bitcoin::network::serialize as btc_serialize; use deps::bitcoin::network::serialize::{RawDecoder, RawEncoder}; use deps::bitcoin::util::hash::Sha256dHash; use burnchains::bitcoin::indexer::{network_id_to_bytes, BitcoinIndexer}; use burnchains::bitcoin::messages::BitcoinMessageHandler; use burnchains::bitcoin::Error as btc_error; use burnchains::bitcoin::PeerMessage; use burnchains::indexer::BurnchainIndexer; use util::get_epoch_time_secs; use util::log; // Based on Andrew Poelstra's rust-bitcoin library. impl BitcoinIndexer { /// Send a Bitcoin protocol message on the wire pub fn send_message(&mut self, payload: btc_message::NetworkMessage) -> Result<(), btc_error> { let message = btc_message::RawNetworkMessage { magic: network_id_to_bytes(self.runtime.network_id), payload: payload, }; self.with_socket(|ref mut sock| { message .consensus_encode(&mut RawEncoder::new(&mut *sock)) .map_err(btc_error::SerializationError)?; sock.flush().map_err(btc_error::Io) }) } /// Receive a Bitcoin protocol message on the wire /// If this method returns Err(ConnectionBroken), then the caller should attempt to re-connect. pub fn recv_message(&mut self) -> Result<PeerMessage, btc_error> { let magic = network_id_to_bytes(self.runtime.network_id); self.with_socket(|ref mut sock| { // read the message off the wire let mut decoder = RawDecoder::new(sock); let decoded: btc_message::RawNetworkMessage = ConsensusDecodable::consensus_decode(&mut decoder).map_err(|e| { // if we can't finish a recv(), then report that the connection is broken match e { btc_serialize::Error::Io(ref io_error) => { if io_error.kind() == io::ErrorKind::UnexpectedEof { btc_error::ConnectionBroken } else { btc_error::Io(io::Error::new( io_error.kind(), "I/O error when processing message", )) } } _ => btc_error::SerializationError(e), } })?; // sanity check -- must match our network if decoded.magic!= magic { return Err(btc_error::InvalidMagic); } Ok(decoded.payload) }) } /// Get sender address from our socket pub fn get_local_sockaddr(&mut self) -> Result<SocketAddr, btc_error> { self.with_socket(|ref mut sock| sock.local_addr().map_err(btc_error::Io)) } /// Get receiver address from our socket pub fn
(&mut self) -> Result<SocketAddr, btc_error> { self.with_socket(|ref mut sock| sock.peer_addr().map_err(btc_error::Io)) } /// Handle and consume message we received, if we can. /// Returns UnhandledMessage if we can't handle the given message. pub fn handle_message<T: BitcoinMessageHandler>( &mut self, message: PeerMessage, handler: Option<&mut T>, ) -> Result<bool, btc_error> { if self.runtime.last_getdata_send_time > 0 && self.runtime.last_getdata_send_time + self.runtime.timeout < get_epoch_time_secs() { warn!("Timed out waiting for block data. Killing connection."); return Err(btc_error::TimedOut); } if self.runtime.last_getheaders_send_time > 0 && self.runtime.last_getheaders_send_time + self.runtime.timeout < get_epoch_time_secs() { warn!("Timed out waiting for headers data. Killing connection."); return Err(btc_error::TimedOut); } // classify the message here, so we can pass it along to the handler explicitly match message { btc_message::NetworkMessage::Version(..) => { return self.handle_version(message).and_then(|_r| Ok(true)); } btc_message::NetworkMessage::Verack => { return self.handle_verack(message).and_then(|_r| Ok(true)); } btc_message::NetworkMessage::Ping(..) => { return self.handle_ping(message).and_then(|_r| Ok(true)); } btc_message::NetworkMessage::Pong(..) => { return self.handle_pong(message).and_then(|_r| Ok(true)); } _ => match handler { Some(custom_handler) => custom_handler.handle_message(self, message.clone()), None => Err(btc_error::UnhandledMessage(message.clone())), }, } } /// Do the initial handshake to the remote peer. /// Returns the remote peer's block height pub fn peer_handshake(&mut self) -> Result<u64, btc_error> { debug!( "Begin peer handshake to {}:{}", self.config.peer_host, self.config.peer_port ); self.send_version()?; let version_reply = self.recv_message()?; self.handle_version(version_reply)?; let verack_reply = self.recv_message()?; self.handle_verack(verack_reply)?; debug!( "Established connection to {}:{}, who has {} blocks", self.config.peer_host, self.config.peer_port, self.runtime.block_height ); Ok(self.runtime.block_height) } /// Connect to a remote peer, do a handshake with the remote peer, and use exponential backoff until we /// succeed in establishing a connection. /// This method masks ConnectionBroken errors, but does not mask other network errors. /// Returns the remote peer's block height on success pub fn connect_handshake_backoff(&mut self) -> Result<u64, btc_error> { let mut backoff: f64 = 1.0; let mut rng = thread_rng(); loop { let connection_result = self.connect(); match connection_result { Ok(()) => { // connected! now do the handshake let handshake_result = self.peer_handshake(); match handshake_result { Ok(block_height) => { // connected! return Ok(block_height); } Err(btc_error::ConnectionBroken) => { // need to try again backoff = 2.0 * backoff + (backoff * rng.gen_range(0.0, 1.0)); } Err(e) => { // propagate other network error warn!( "Failed to handshake with {}:{}: {:?}", &self.config.peer_host, self.config.peer_port, &e ); return Err(e); } } } Err(err_msg) => { error!( "Failed to connect to peer {}:{}: {}", &self.config.peer_host, self.config.peer_port, err_msg ); backoff = 2.0 * backoff + (backoff * rng.gen_range(0.0, 1.0)); } } // don't sleep more than 60 seconds if backoff > 60.0 { backoff = 60.0; } if backoff > 10.0 { warn!("Connection broken; retrying in {} sec...", backoff); } let duration = time::Duration::from_millis((backoff * 1_000.0) as u64); thread::sleep(duration); } } /// Send a Version message pub fn send_version(&mut self) -> Result<(), btc_error> { let timestamp = match SystemTime::now().duration_since(UNIX_EPOCH) { Ok(dur) => dur, Err(err) => err.duration(), } .as_secs() as i64; let local_addr = self.get_local_sockaddr()?; let remote_addr = self.get_remote_sockaddr()?; let sender_address = btc_network_address::Address::new(&local_addr, 0); let remote_address = btc_network_address::Address::new(&remote_addr, 0); let payload = btc_message_network::VersionMessage { version: btc_constants::PROTOCOL_VERSION, services: 0, timestamp: timestamp, receiver: remote_address, sender: sender_address, nonce: self.runtime.version_nonce, user_agent: self.runtime.user_agent.to_owned(), start_height: 0, relay: false, }; debug!( "Send version (nonce={}) to {}:{}", self.runtime.version_nonce, self.config.peer_host, self.config.peer_port ); self.send_message(btc_message::NetworkMessage::Version(payload)) } /// Receive a Version message and reply with a Verack pub fn handle_version(&mut self, version_message: PeerMessage) -> Result<(), btc_error> { match version_message { btc_message::NetworkMessage::Version(msg_body) => { debug!( "Handle version -- remote peer blockchain height is {}", msg_body.start_height ); self.runtime.block_height = msg_body.start_height as u64; return self.send_verack(); } _ => { error!("Did not receive version, but got {:?}", version_message); } }; return Err(btc_error::InvalidMessage(version_message)); } /// Send a verack pub fn send_verack(&mut self) -> Result<(), btc_error> { let payload = btc_message::NetworkMessage::Verack; debug!("Send verack"); self.send_message(payload) } /// Handle a verack we received. /// Does nothing. pub fn handle_verack(&mut self, verack_message: PeerMessage) -> Result<(), btc_error> { match verack_message { btc_message::NetworkMessage::Verack => { debug!("Handle verack"); return Ok(()); } _ => { error!("Did not receive verack, but got {:?}", verack_message); } }; Err(btc_error::InvalidMessage(verack_message)) } /// Respond to a Ping message by sending a Pong message pub fn handle_ping(&mut self, ping_message: PeerMessage) -> Result<(), btc_error> { match ping_message { btc_message::NetworkMessage::Ping(ref n) => { debug!("Handle ping {}", n); let payload = btc_message::NetworkMessage::Pong(*n); debug!("Send pong {}", n); return self.send_message(payload); } _ => { error!("Did not receive ping, but got {:?}", ping_message); } }; Err(btc_error::InvalidMessage(ping_message)) } /// Respond to a Pong message. /// Does nothing. pub fn handle_pong(&mut self, pong_message: PeerMessage) -> Result<(), btc_error> { match pong_message { btc_message::NetworkMessage::Pong(n) => { debug!("Handle pong {}", n); return Ok(()); } _ => { error!("Did not receive pong, but got {:?}", pong_message); } }; Err(btc_error::InvalidReply) } /// Send a GetHeaders message /// Note that this isn't a generic GetHeaders message -- you should use this only to ask /// for a batch of 2,000 block hashes after this given hash. pub fn send_getheaders(&mut self, prev_block_hash: Sha256dHash) -> Result<(), btc_error> { let getheaders = btc_message_blockdata::GetHeadersMessage::new(vec![prev_block_hash], prev_block_hash); let payload = btc_message::NetworkMessage::GetHeaders(getheaders); debug!( "Send GetHeaders {} for 2000 headers to {}:{}", prev_block_hash.be_hex_string(), self.config.peer_host, self.config.peer_port ); self.runtime.last_getheaders_send_time = get_epoch_time_secs(); self.send_message(payload) } /// Send a GetData message pub fn send_getdata(&mut self, block_hashes: &Vec<Sha256dHash>) -> Result<(), btc_error> { assert!(block_hashes.len() > 0); let getdata_invs = block_hashes .iter() .map(|h| btc_message_blockdata::Inventory { inv_type: btc_message_blockdata::InvType::Block, hash: h.clone(), }) .collect(); let getdata = btc_message::NetworkMessage::GetData(getdata_invs); self.runtime.last_getdata_send_time = get_epoch_time_secs(); debug!( "Send GetData {}-{} to {}:{}", block_hashes[0].be_hex_string(), block_hashes[block_hashes.len() - 1].be_hex_string(), self.config.peer_host, self.config.peer_port ); self.send_message(getdata) } }
get_remote_sockaddr
identifier_name
macros.rs
#[macro_export] macro_rules! implement_lua_push { ($ty:ty, $cb:expr) => { impl<L> $crate::Push<L> for Sound where L: $crate::AsMutLua { fn push_to_lua(self, lua: L) -> $crate::PushGuard<L> { $crate::userdata::push_userdata(self, lua, $cb) } } }; } #[macro_export] macro_rules! implement_lua_read { ($ty:ty) => { impl<'s, 'c> hlua::LuaRead<&'c mut hlua::InsideCallback> for &'s mut Sound { fn lua_read_at_position(lua: &'c mut hlua::InsideCallback, index: i32) -> Result<&'s mut Sound, &'c mut hlua::InsideCallback> { // FIXME: unsafe { ::std::mem::transmute($crate::userdata::read_userdata::<$ty>(lua, index)) } } } impl<'s, 'c> hlua::LuaRead<&'c mut hlua::InsideCallback> for &'s Sound { fn lua_read_at_position(lua: &'c mut hlua::InsideCallback, index: i32) -> Result<&'s Sound, &'c mut hlua::InsideCallback> { // FIXME:
unsafe { ::std::mem::transmute($crate::userdata::read_userdata::<$ty>(lua, index)) } } } }; }
random_line_split
p137.rs
//! [Problem 137](https://projecteuler.net/problem=137) solver. //! //! ```math //! A_F(x) = x F_1 + x^2 F_2 + x^3 F_3 + \dots //! x A_F(x) = x^2 F_1 + x^3 F_2 + x^4 F_3 + \dots //! x^2 A_F(x) = x^3 F_1 + x^4 F_2 + x^5 F_3 + \dots //! (1 - x - x^2) A_F(x) = x F_1 + x^2 (F_2 - F_1) + x^3 (F_3 - F_2 - F_1) + \dots //! ``` //! //! `F_k = F_{k-1} + F_k`, `F_1 = F_2 = 1` より、 //! //! ```math //! (1 - x - x^2) A_F(x) = x //! ``` //! //! `A_F(x)` は正の整数なので `n := A_F(x) > 0` とおくと、以下の二次方程式を得る。 //! //! ```math //! n x^2 + (n + 1) x - n = 0 //! ``` //! //! この方程式が有理数解をもつのは、判別式 `D` が平方数の場合であり、ある整数 `m` を用いると以下のように表せる場合である。 //! //! ```math //! D = (n+1)^2 + 4n^2 = m^2 //! (5n+1)^2 - 5m^2 = -4 //! ``` //! //! これは Pell 方程式であり、解を列挙すれば良い。 //! //! `p := 5n + 1`, `q := m` とおくと、 //! `p_0 = 1`, `q_0 = 1` より、 //! ```math //! p_{k+1} = \frac{3p_k + 5q_k}{2} //! q_{k+1} = \frac{p_k + 3q_k}{2} //! ``` //! //! となり、これが一般解である。 #![warn(bad_style, unused, unused_extern_crates, unused_import_braces, unused_qualifications, unused_results)] #[macro_use(problem)] extern crate common; extern crate itertools; use itertools::Unfold; fn compute(i: usize) -> u64 { Unfold::new((1, 1), |state| { let next = ((3 * state.0 + 5 * state.1) / 2, (state.0 + 3 * state.1) / 2); *state = next; Some(next) }).filter_map(|(p, q)| { if p % 5 == 1 { Some((p / 5, q)) } else { None } }).nth(i).unwrap().0 } fn solve() -> String { compute(14).to_string() } problem!("1120149658760", solve); #[cfg(test)] mod tests { #[test] fn tenth_sol() { assert_eq!(74049690, super::compute(9)); } }
identifier_body
p137.rs
//! [Problem 137](https://projecteuler.net/problem=137) solver. //! //! ```math //! A_F(x) = x F_1 + x^2 F_2 + x^3 F_3 + \dots //! x A_F(x) = x^2 F_1 + x^3 F_2 + x^4 F_3 + \dots //! x^2 A_F(x) = x^3 F_1 + x^4 F_2 + x^5 F_3 + \dots //! (1 - x - x^2) A_F(x) = x F_1 + x^2 (F_2 - F_1) + x^3 (F_3 - F_2 - F_1) + \dots //! ``` //! //! `F_k = F_{k-1} + F_k`, `F_1 = F_2 = 1` より、 //! //! ```math //! (1 - x - x^2) A_F(x) = x //! ``` //! //! `A_F(x)` は正の整数なので `n := A_F(x) > 0` とおくと、以下の二次方程式を得る。 //! //! ```math //! n x^2 + (n + 1) x - n = 0 //! ``` //! //! この方程式が有理数解をもつのは、判別式 `D` が平方数の場合であり、ある整数 `m` を用いると以下のように表せる場合である。 //! //! ```math //! D = (n+1)^2 + 4n^2 = m^2 //! (5n+1)^2 - 5m^2 = -4 //! ``` //! //! これは Pell 方程式であり、解を列挙すれば良い。 //! //! `p := 5n + 1`, `q := m` とおくと、 //! `p_0 = 1`, `q_0 = 1` より、 //! ```math //! p_{k+1} = \frac{3p_k + 5q_k}{2} //! q_{k+1} = \frac{p_k + 3q_k}{2} //! ``` //! //! となり、これが一般解である。 #![warn(bad_style, unused, unused_extern_crates, unused_import_braces, unused_qualifications, unused_results)] #[macro_use(problem)] extern crate common; extern crate itertools; use itertools::Unfold; fn compute(i: usize) -> u64 { Unfold::new((1, 1), |state| { let next = ((3 * state.0 + 5 * state.1) / 2, (state.0 + 3 * state.1) / 2); *state = next; Some(next) }).filter_map(|(p, q)| { if p % 5 == 1 { Some((p / 5, q)) } else { None } }).nth(i).unwrap().0 } fn solve() -> String { compute(14).to_string() } problem!("1120149658760", solve); #[cfg(test)] mod tests { #[test] fn tenth_sol() { assert_eq!(74049690, super::compute(
9)); } }
conditional_block
p137.rs
//! [Problem 137](https://projecteuler.net/problem=137) solver. //! //! ```math //! A_F(x) = x F_1 + x^2 F_2 + x^3 F_3 + \dots //! x A_F(x) = x^2 F_1 + x^3 F_2 + x^4 F_3 + \dots //! x^2 A_F(x) = x^3 F_1 + x^4 F_2 + x^5 F_3 + \dots //! (1 - x - x^2) A_F(x) = x F_1 + x^2 (F_2 - F_1) + x^3 (F_3 - F_2 - F_1) + \dots //! ``` //! //! `F_k = F_{k-1} + F_k`, `F_1 = F_2 = 1` より、 //! //! ```math //! (1 - x - x^2) A_F(x) = x //! ``` //! //! `A_F(x)` は正の整数なので `n := A_F(x) > 0` とおくと、以下の二次方程式を得る。 //! //! ```math //! n x^2 + (n + 1) x - n = 0 //! ``` //! //! この方程式が有理数解をもつのは、判別式 `D` が平方数の場合であり、ある整数 `m` を用いると以下のように表せる場合である。 //! //! ```math //! D = (n+1)^2 + 4n^2 = m^2 //! (5n+1)^2 - 5m^2 = -4 //! ``` //! //! これは Pell 方程式であり、解を列挙すれば良い。 //! //! `p := 5n + 1`, `q := m` とおくと、 //! `p_0 = 1`, `q_0 = 1` より、 //! ```math //! p_{k+1} = \frac{3p_k + 5q_k}{2} //! q_{k+1} = \frac{p_k + 3q_k}{2} //! ``` //! //! となり、これが一般解である。 #![warn(bad_style, unused, unused_extern_crates, unused_import_braces, unused_qualifications, unused_results)] #[macro_use(problem)] extern crate common; extern crate itertools; use itertools::Unfold; fn compute(i: usize) -> u64 { Unfold::new((1, 1), |state| { let next = ((3 * state.0 + 5 * state.1) / 2, (state.0 + 3 * state.1) / 2); *state = next; Some(next)
} else { None } }).nth(i).unwrap().0 } fn solve() -> String { compute(14).to_string() } problem!("1120149658760", solve); #[cfg(test)] mod tests { #[test] fn tenth_sol() { assert_eq!(74049690, super::compute(9)); } }
}).filter_map(|(p, q)| { if p % 5 == 1 { Some((p / 5, q))
random_line_split
p137.rs
//! [Problem 137](https://projecteuler.net/problem=137) solver. //! //! ```math //! A_F(x) = x F_1 + x^2 F_2 + x^3 F_3 + \dots //! x A_F(x) = x^2 F_1 + x^3 F_2 + x^4 F_3 + \dots //! x^2 A_F(x) = x^3 F_1 + x^4 F_2 + x^5 F_3 + \dots //! (1 - x - x^2) A_F(x) = x F_1 + x^2 (F_2 - F_1) + x^3 (F_3 - F_2 - F_1) + \dots //! ``` //! //! `F_k = F_{k-1} + F_k`, `F_1 = F_2 = 1` より、 //! //! ```math //! (1 - x - x^2) A_F(x) = x //! ``` //! //! `A_F(x)` は正の整数なので `n := A_F(x) > 0` とおくと、以下の二次方程式を得る。 //! //! ```math //! n x^2 + (n + 1) x - n = 0 //! ``` //! //! この方程式が有理数解をもつのは、判別式 `D` が平方数の場合であり、ある整数 `m` を用いると以下のように表せる場合である。 //! //! ```math //! D = (n+1)^2 + 4n^2 = m^2 //! (5n+1)^2 - 5m^2 = -4 //! ``` //! //! これは Pell 方程式であり、解を列挙すれば良い。 //! //! `p := 5n + 1`, `q := m` とおくと、 //! `p_0 = 1`, `q_0 = 1` より、 //! ```math //! p_{k+1} = \frac{3p_k + 5q_k}{2} //! q_{k+1} = \frac{p_k + 3q_k}{2} //! ``` //! //! となり、これが一般解である。 #![warn(bad_style, unused, unused_extern_crates, unused_import_braces, unused_qualifications, unused_results)] #[macro_use(problem)] extern crate common; extern crate itertools; use itertools::Unfold; fn compute(i: usize) -> u64 { Unfold::new((1, 1), |state| { let next = ((3 * state.0 + 5 * state.1) / 2, (state.0 + 3 * state.1) / 2); *state = next; Some(next) }).filter_map(|(p, q)| { if p % 5 == 1 { Some((p / 5, q)) } else { None } }).nth(i).unwrap().0 } fn solve() -> String { compute(14).to_string() } problem!("1120149658760", solve); #[cfg(test)] mod tests { #[test] fn tenth_sol() { assert_eq!(74049690, super::compute(9)); } }
identifier_name
time.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Specified time values. use cssparser::{Parser, Token}; use parser::{ParserContext, Parse}; use std::ascii::AsciiExt; use std::fmt; use style_traits::{ToCss, ParseError, StyleParseErrorKind}; use style_traits::values::specified::AllowedNumericType; use values::CSSFloat; use values::computed::{Context, ToComputedValue}; use values::computed::time::Time as ComputedTime; use values::specified::calc::CalcNode; /// A time value according to CSS-VALUES § 6.2. #[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq)] pub struct Time { seconds: CSSFloat, unit: TimeUnit, was_calc: bool, } /// A time unit. #[derive(Clone, Copy, Debug, Eq, MallocSizeOf, PartialEq)] pub enum TimeUnit { /// `s` Second, /// `ms` Millisecond, } impl Time { /// Returns a time value that represents `seconds` seconds. pub fn from_seconds(seconds: CSSFloat) -> Self { Time { seconds, unit: TimeUnit::Second, was_calc: false } } /// Returns `0s`. pub fn zero() -> Self { Self::from_seconds(0.0) } /// Returns the time in fractional seconds. pub fn seconds(self) -> CSSFloat { self.seconds } /// Parses a time according to CSS-VALUES § 6.2. pub fn parse_dimension( value: CSSFloat, unit: &str, was_calc: bool ) -> Result<Time, ()> { let (seconds, unit) = match_ignore_ascii_case! { unit, "s" => (value, TimeUnit::Second), "ms" => (value / 1000.0, TimeUnit::Millisecond), _ => return Err(()) }; Ok(Time { seconds, unit, was_calc }) } /// Returns a `Time` value from a CSS `calc()` expression.
was_calc: true, } } fn parse_with_clamping_mode<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, clamping_mode: AllowedNumericType ) -> Result<Self, ParseError<'i>> { use style_traits::ParsingMode; let location = input.current_source_location(); // FIXME: remove early returns when lifetimes are non-lexical match input.next() { // Note that we generally pass ParserContext to is_ok() to check // that the ParserMode of the ParserContext allows all numeric // values for SMIL regardless of clamping_mode, but in this Time // value case, the value does not animate for SMIL at all, so we use // ParsingMode::DEFAULT directly. Ok(&Token::Dimension { value, ref unit,.. }) if clamping_mode.is_ok(ParsingMode::DEFAULT, value) => { return Time::parse_dimension(value, unit, /* from_calc = */ false) .map_err(|()| location.new_custom_error(StyleParseErrorKind::UnspecifiedError)) } Ok(&Token::Function(ref name)) if name.eq_ignore_ascii_case("calc") => {} Ok(t) => return Err(location.new_unexpected_token_error(t.clone())), Err(e) => return Err(e.into()) } match input.parse_nested_block(|i| CalcNode::parse_time(context, i)) { Ok(time) if clamping_mode.is_ok(ParsingMode::DEFAULT, time.seconds) => Ok(time), _ => Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)), } } /// Parses a non-negative time value. pub fn parse_non_negative<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't> ) -> Result<Self, ParseError<'i>> { Self::parse_with_clamping_mode(context, input, AllowedNumericType::NonNegative) } } impl ToComputedValue for Time { type ComputedValue = ComputedTime; fn to_computed_value(&self, _context: &Context) -> Self::ComputedValue { ComputedTime::from_seconds(self.seconds()) } fn from_computed_value(computed: &Self::ComputedValue) -> Self { Time { seconds: computed.seconds(), unit: TimeUnit::Second, was_calc: false, } } } impl Parse for Time { fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> { Self::parse_with_clamping_mode(context, input, AllowedNumericType::All) } } impl ToCss for Time { fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write, { if self.was_calc { dest.write_str("calc(")?; } match self.unit { TimeUnit::Second => { self.seconds.to_css(dest)?; dest.write_str("s")?; } TimeUnit::Millisecond => { (self.seconds * 1000.).to_css(dest)?; dest.write_str("ms")?; } } if self.was_calc { dest.write_str(")")?; } Ok(()) } }
pub fn from_calc(seconds: CSSFloat) -> Self { Time { seconds: seconds, unit: TimeUnit::Second,
random_line_split
time.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Specified time values. use cssparser::{Parser, Token}; use parser::{ParserContext, Parse}; use std::ascii::AsciiExt; use std::fmt; use style_traits::{ToCss, ParseError, StyleParseErrorKind}; use style_traits::values::specified::AllowedNumericType; use values::CSSFloat; use values::computed::{Context, ToComputedValue}; use values::computed::time::Time as ComputedTime; use values::specified::calc::CalcNode; /// A time value according to CSS-VALUES § 6.2. #[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq)] pub struct Time { seconds: CSSFloat, unit: TimeUnit, was_calc: bool, } /// A time unit. #[derive(Clone, Copy, Debug, Eq, MallocSizeOf, PartialEq)] pub enum TimeUnit { /// `s` Second, /// `ms` Millisecond, } impl Time { /// Returns a time value that represents `seconds` seconds. pub fn from_seconds(seconds: CSSFloat) -> Self { Time { seconds, unit: TimeUnit::Second, was_calc: false } } /// Returns `0s`. pub fn zero() -> Self { Self::from_seconds(0.0) } /// Returns the time in fractional seconds. pub fn seconds(self) -> CSSFloat { self.seconds } /// Parses a time according to CSS-VALUES § 6.2. pub fn parse_dimension( value: CSSFloat, unit: &str, was_calc: bool ) -> Result<Time, ()> { let (seconds, unit) = match_ignore_ascii_case! { unit, "s" => (value, TimeUnit::Second), "ms" => (value / 1000.0, TimeUnit::Millisecond), _ => return Err(()) }; Ok(Time { seconds, unit, was_calc }) } /// Returns a `Time` value from a CSS `calc()` expression. pub fn from_calc(seconds: CSSFloat) -> Self { Time { seconds: seconds, unit: TimeUnit::Second, was_calc: true, } } fn pa
i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, clamping_mode: AllowedNumericType ) -> Result<Self, ParseError<'i>> { use style_traits::ParsingMode; let location = input.current_source_location(); // FIXME: remove early returns when lifetimes are non-lexical match input.next() { // Note that we generally pass ParserContext to is_ok() to check // that the ParserMode of the ParserContext allows all numeric // values for SMIL regardless of clamping_mode, but in this Time // value case, the value does not animate for SMIL at all, so we use // ParsingMode::DEFAULT directly. Ok(&Token::Dimension { value, ref unit,.. }) if clamping_mode.is_ok(ParsingMode::DEFAULT, value) => { return Time::parse_dimension(value, unit, /* from_calc = */ false) .map_err(|()| location.new_custom_error(StyleParseErrorKind::UnspecifiedError)) } Ok(&Token::Function(ref name)) if name.eq_ignore_ascii_case("calc") => {} Ok(t) => return Err(location.new_unexpected_token_error(t.clone())), Err(e) => return Err(e.into()) } match input.parse_nested_block(|i| CalcNode::parse_time(context, i)) { Ok(time) if clamping_mode.is_ok(ParsingMode::DEFAULT, time.seconds) => Ok(time), _ => Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)), } } /// Parses a non-negative time value. pub fn parse_non_negative<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't> ) -> Result<Self, ParseError<'i>> { Self::parse_with_clamping_mode(context, input, AllowedNumericType::NonNegative) } } impl ToComputedValue for Time { type ComputedValue = ComputedTime; fn to_computed_value(&self, _context: &Context) -> Self::ComputedValue { ComputedTime::from_seconds(self.seconds()) } fn from_computed_value(computed: &Self::ComputedValue) -> Self { Time { seconds: computed.seconds(), unit: TimeUnit::Second, was_calc: false, } } } impl Parse for Time { fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> { Self::parse_with_clamping_mode(context, input, AllowedNumericType::All) } } impl ToCss for Time { fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write, { if self.was_calc { dest.write_str("calc(")?; } match self.unit { TimeUnit::Second => { self.seconds.to_css(dest)?; dest.write_str("s")?; } TimeUnit::Millisecond => { (self.seconds * 1000.).to_css(dest)?; dest.write_str("ms")?; } } if self.was_calc { dest.write_str(")")?; } Ok(()) } }
rse_with_clamping_mode<'
identifier_name
time.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Specified time values. use cssparser::{Parser, Token}; use parser::{ParserContext, Parse}; use std::ascii::AsciiExt; use std::fmt; use style_traits::{ToCss, ParseError, StyleParseErrorKind}; use style_traits::values::specified::AllowedNumericType; use values::CSSFloat; use values::computed::{Context, ToComputedValue}; use values::computed::time::Time as ComputedTime; use values::specified::calc::CalcNode; /// A time value according to CSS-VALUES § 6.2. #[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq)] pub struct Time { seconds: CSSFloat, unit: TimeUnit, was_calc: bool, } /// A time unit. #[derive(Clone, Copy, Debug, Eq, MallocSizeOf, PartialEq)] pub enum TimeUnit { /// `s` Second, /// `ms` Millisecond, } impl Time { /// Returns a time value that represents `seconds` seconds. pub fn from_seconds(seconds: CSSFloat) -> Self { Time { seconds, unit: TimeUnit::Second, was_calc: false } } /// Returns `0s`. pub fn zero() -> Self { Self::from_seconds(0.0) } /// Returns the time in fractional seconds. pub fn seconds(self) -> CSSFloat { self.seconds } /// Parses a time according to CSS-VALUES § 6.2. pub fn parse_dimension( value: CSSFloat, unit: &str, was_calc: bool ) -> Result<Time, ()> { let (seconds, unit) = match_ignore_ascii_case! { unit, "s" => (value, TimeUnit::Second), "ms" => (value / 1000.0, TimeUnit::Millisecond), _ => return Err(()) }; Ok(Time { seconds, unit, was_calc }) } /// Returns a `Time` value from a CSS `calc()` expression. pub fn from_calc(seconds: CSSFloat) -> Self { Time { seconds: seconds, unit: TimeUnit::Second, was_calc: true, } } fn parse_with_clamping_mode<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, clamping_mode: AllowedNumericType ) -> Result<Self, ParseError<'i>> { use style_traits::ParsingMode; let location = input.current_source_location(); // FIXME: remove early returns when lifetimes are non-lexical match input.next() { // Note that we generally pass ParserContext to is_ok() to check // that the ParserMode of the ParserContext allows all numeric // values for SMIL regardless of clamping_mode, but in this Time // value case, the value does not animate for SMIL at all, so we use // ParsingMode::DEFAULT directly. Ok(&Token::Dimension { value, ref unit,.. }) if clamping_mode.is_ok(ParsingMode::DEFAULT, value) => { return Time::parse_dimension(value, unit, /* from_calc = */ false) .map_err(|()| location.new_custom_error(StyleParseErrorKind::UnspecifiedError)) } Ok(&Token::Function(ref name)) if name.eq_ignore_ascii_case("calc") => {} Ok(t) => return Err(location.new_unexpected_token_error(t.clone())), Err(e) => return Err(e.into()) } match input.parse_nested_block(|i| CalcNode::parse_time(context, i)) { Ok(time) if clamping_mode.is_ok(ParsingMode::DEFAULT, time.seconds) => Ok(time), _ => Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)), } } /// Parses a non-negative time value. pub fn parse_non_negative<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't> ) -> Result<Self, ParseError<'i>> { Self::parse_with_clamping_mode(context, input, AllowedNumericType::NonNegative) } } impl ToComputedValue for Time { type ComputedValue = ComputedTime; fn to_computed_value(&self, _context: &Context) -> Self::ComputedValue {
fn from_computed_value(computed: &Self::ComputedValue) -> Self { Time { seconds: computed.seconds(), unit: TimeUnit::Second, was_calc: false, } } } impl Parse for Time { fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> { Self::parse_with_clamping_mode(context, input, AllowedNumericType::All) } } impl ToCss for Time { fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write, { if self.was_calc { dest.write_str("calc(")?; } match self.unit { TimeUnit::Second => { self.seconds.to_css(dest)?; dest.write_str("s")?; } TimeUnit::Millisecond => { (self.seconds * 1000.).to_css(dest)?; dest.write_str("ms")?; } } if self.was_calc { dest.write_str(")")?; } Ok(()) } }
ComputedTime::from_seconds(self.seconds()) }
identifier_body
module.rs
use std::fmt; use libc; use llvm_sys::prelude::*; use llvm_sys::core as llvm; use super::*; use std::path::Path; error_chain! { errors { ModulePrintFile(t: String) { description("Error while printing module to file") display("Error while printing module to file: '{}'", t) } } } // No `Drop` impl is needed as this is disposed of when the associated context is disposed #[derive(Debug)] pub struct Module { pub ptr: LLVMModuleRef } impl_llvm_ref!(Module, LLVMModuleRef); impl Module { pub fn dump(&self) { unsafe { llvm::LLVMDumpModule(self.ptr) } } pub fn add_function(&mut self, func_ty: &types::Function, name: &str) -> Function { let c_name = CString::new(name).unwrap(); let p = unsafe { llvm::LLVMAddFunction(self.ptr, c_name.as_ptr(), func_ty.into()) }; Function { ptr: p } } pub fn
(&mut self, name: &str) -> Option<Function> { let c_name = CString::new(name).unwrap(); let res = unsafe { llvm::LLVMGetNamedFunction(self.ptr, c_name.as_ptr()) }; if res.is_null() { None } else { Some(Function::from_value_ref(res)) } } /// Prints a module to a file /// /// ```rust /// use llvm::Context; /// /// let context = Context::global(); /// let module = context.module_create_with_name("name"); /// let path = "./module_file"; /// /// module.print_to_file(path).unwrap(); /// /// assert!(std::path::Path::new(path).exists()); /// std::fs::remove_file(path).unwrap() /// ``` pub fn print_to_file<P: AsRef<Path>>(&self, path: P) -> Result<()> { let str_path = path.as_ref().to_str().expect("Failed to convert path to unicode"); let c_path = CString::new(str_path).unwrap(); let mut em: usize = 0; let em_ptr: *mut usize = &mut em; unsafe { llvm::LLVMPrintModuleToFile(self.ptr, c_path.as_ptr(), em_ptr as *mut *mut i8); if em == 0 { // no error message was set Ok(()) } else { Err(ErrorKind::ModulePrintFile(c_str_to_str!(em as *const i8).into()).into()) } } } } impl fmt::Display for Module { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { unsafe { let c_str = llvm::LLVMPrintModuleToString(self.ptr); let len = libc::strlen(c_str); let s = String::from_raw_parts( c_str as *mut u8, len + 1, len + 1 ); write!(f, "{}", s) } } }
get_named_function
identifier_name
module.rs
use std::fmt; use libc; use llvm_sys::prelude::*; use llvm_sys::core as llvm; use super::*; use std::path::Path; error_chain! { errors { ModulePrintFile(t: String) { description("Error while printing module to file") display("Error while printing module to file: '{}'", t) } } } // No `Drop` impl is needed as this is disposed of when the associated context is disposed #[derive(Debug)] pub struct Module { pub ptr: LLVMModuleRef } impl_llvm_ref!(Module, LLVMModuleRef); impl Module { pub fn dump(&self) { unsafe { llvm::LLVMDumpModule(self.ptr) } } pub fn add_function(&mut self, func_ty: &types::Function, name: &str) -> Function { let c_name = CString::new(name).unwrap(); let p = unsafe { llvm::LLVMAddFunction(self.ptr, c_name.as_ptr(), func_ty.into()) }; Function { ptr: p } } pub fn get_named_function(&mut self, name: &str) -> Option<Function> { let c_name = CString::new(name).unwrap(); let res = unsafe { llvm::LLVMGetNamedFunction(self.ptr, c_name.as_ptr()) }; if res.is_null() { None } else { Some(Function::from_value_ref(res)) } } /// Prints a module to a file /// /// ```rust /// use llvm::Context; /// /// let context = Context::global(); /// let module = context.module_create_with_name("name"); /// let path = "./module_file"; /// /// module.print_to_file(path).unwrap(); /// /// assert!(std::path::Path::new(path).exists()); /// std::fs::remove_file(path).unwrap() /// ``` pub fn print_to_file<P: AsRef<Path>>(&self, path: P) -> Result<()> { let str_path = path.as_ref().to_str().expect("Failed to convert path to unicode"); let c_path = CString::new(str_path).unwrap(); let mut em: usize = 0; let em_ptr: *mut usize = &mut em; unsafe { llvm::LLVMPrintModuleToFile(self.ptr, c_path.as_ptr(), em_ptr as *mut *mut i8); if em == 0 { // no error message was set Ok(()) } else { Err(ErrorKind::ModulePrintFile(c_str_to_str!(em as *const i8).into()).into()) } } } } impl fmt::Display for Module { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
len + 1, len + 1 ); write!(f, "{}", s) } } }
unsafe { let c_str = llvm::LLVMPrintModuleToString(self.ptr); let len = libc::strlen(c_str); let s = String::from_raw_parts( c_str as *mut u8,
random_line_split
module.rs
use std::fmt; use libc; use llvm_sys::prelude::*; use llvm_sys::core as llvm; use super::*; use std::path::Path; error_chain! { errors { ModulePrintFile(t: String) { description("Error while printing module to file") display("Error while printing module to file: '{}'", t) } } } // No `Drop` impl is needed as this is disposed of when the associated context is disposed #[derive(Debug)] pub struct Module { pub ptr: LLVMModuleRef } impl_llvm_ref!(Module, LLVMModuleRef); impl Module { pub fn dump(&self) { unsafe { llvm::LLVMDumpModule(self.ptr) } } pub fn add_function(&mut self, func_ty: &types::Function, name: &str) -> Function
pub fn get_named_function(&mut self, name: &str) -> Option<Function> { let c_name = CString::new(name).unwrap(); let res = unsafe { llvm::LLVMGetNamedFunction(self.ptr, c_name.as_ptr()) }; if res.is_null() { None } else { Some(Function::from_value_ref(res)) } } /// Prints a module to a file /// /// ```rust /// use llvm::Context; /// /// let context = Context::global(); /// let module = context.module_create_with_name("name"); /// let path = "./module_file"; /// /// module.print_to_file(path).unwrap(); /// /// assert!(std::path::Path::new(path).exists()); /// std::fs::remove_file(path).unwrap() /// ``` pub fn print_to_file<P: AsRef<Path>>(&self, path: P) -> Result<()> { let str_path = path.as_ref().to_str().expect("Failed to convert path to unicode"); let c_path = CString::new(str_path).unwrap(); let mut em: usize = 0; let em_ptr: *mut usize = &mut em; unsafe { llvm::LLVMPrintModuleToFile(self.ptr, c_path.as_ptr(), em_ptr as *mut *mut i8); if em == 0 { // no error message was set Ok(()) } else { Err(ErrorKind::ModulePrintFile(c_str_to_str!(em as *const i8).into()).into()) } } } } impl fmt::Display for Module { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { unsafe { let c_str = llvm::LLVMPrintModuleToString(self.ptr); let len = libc::strlen(c_str); let s = String::from_raw_parts( c_str as *mut u8, len + 1, len + 1 ); write!(f, "{}", s) } } }
{ let c_name = CString::new(name).unwrap(); let p = unsafe { llvm::LLVMAddFunction(self.ptr, c_name.as_ptr(), func_ty.into()) }; Function { ptr: p } }
identifier_body
module.rs
use std::fmt; use libc; use llvm_sys::prelude::*; use llvm_sys::core as llvm; use super::*; use std::path::Path; error_chain! { errors { ModulePrintFile(t: String) { description("Error while printing module to file") display("Error while printing module to file: '{}'", t) } } } // No `Drop` impl is needed as this is disposed of when the associated context is disposed #[derive(Debug)] pub struct Module { pub ptr: LLVMModuleRef } impl_llvm_ref!(Module, LLVMModuleRef); impl Module { pub fn dump(&self) { unsafe { llvm::LLVMDumpModule(self.ptr) } } pub fn add_function(&mut self, func_ty: &types::Function, name: &str) -> Function { let c_name = CString::new(name).unwrap(); let p = unsafe { llvm::LLVMAddFunction(self.ptr, c_name.as_ptr(), func_ty.into()) }; Function { ptr: p } } pub fn get_named_function(&mut self, name: &str) -> Option<Function> { let c_name = CString::new(name).unwrap(); let res = unsafe { llvm::LLVMGetNamedFunction(self.ptr, c_name.as_ptr()) }; if res.is_null() { None } else { Some(Function::from_value_ref(res)) } } /// Prints a module to a file /// /// ```rust /// use llvm::Context; /// /// let context = Context::global(); /// let module = context.module_create_with_name("name"); /// let path = "./module_file"; /// /// module.print_to_file(path).unwrap(); /// /// assert!(std::path::Path::new(path).exists()); /// std::fs::remove_file(path).unwrap() /// ``` pub fn print_to_file<P: AsRef<Path>>(&self, path: P) -> Result<()> { let str_path = path.as_ref().to_str().expect("Failed to convert path to unicode"); let c_path = CString::new(str_path).unwrap(); let mut em: usize = 0; let em_ptr: *mut usize = &mut em; unsafe { llvm::LLVMPrintModuleToFile(self.ptr, c_path.as_ptr(), em_ptr as *mut *mut i8); if em == 0
else { Err(ErrorKind::ModulePrintFile(c_str_to_str!(em as *const i8).into()).into()) } } } } impl fmt::Display for Module { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { unsafe { let c_str = llvm::LLVMPrintModuleToString(self.ptr); let len = libc::strlen(c_str); let s = String::from_raw_parts( c_str as *mut u8, len + 1, len + 1 ); write!(f, "{}", s) } } }
{ // no error message was set Ok(()) }
conditional_block
unary.rs
//! Module implementing evaluation of unary operator AST nodes. use eval::{self, api, Eval, Context, Value}; use parse::ast::UnaryOpNode; impl Eval for UnaryOpNode { fn eval(&self, context: &mut Context) -> eval::Result { let arg = try!(self.arg.eval(context)); match &self.op[..] { "+" => UnaryOpNode::eval_plus(arg), "-" => UnaryOpNode::eval_minus(arg), "!" => UnaryOpNode::eval_bang(arg), _ => Err(eval::Error::new( &format!("unknown unary operator: `{}`", self.op) )) } } } impl UnaryOpNode { /// Evaluate the "+" operator for one value. fn eval_plus(arg: Value) -> eval::Result { eval1!(arg : Integer { arg }); eval1!(arg : Float { arg }); UnaryOpNode::err("+", &arg) } /// Evaluate the "-" operator for one value. fn
(arg: Value) -> eval::Result { eval1!(arg : Integer { -arg }); eval1!(arg : Float { -arg }); UnaryOpNode::err("-", &arg) } /// Evaluate the "!" operator for one value. #[inline] fn eval_bang(arg: Value) -> eval::Result { let arg = try!(api::conv::bool(arg)).unwrap_bool(); Ok(Value::Boolean(!arg)) } } impl UnaryOpNode { /// Produce an error about invalid argument for an operator. #[inline] fn err(op: &str, arg: &Value) -> eval::Result { Err(eval::Error::new(&format!( "invalid argument for `{}` operator: `{:?}`", op, arg ))) } }
eval_minus
identifier_name
unary.rs
//! Module implementing evaluation of unary operator AST nodes. use eval::{self, api, Eval, Context, Value}; use parse::ast::UnaryOpNode; impl Eval for UnaryOpNode { fn eval(&self, context: &mut Context) -> eval::Result { let arg = try!(self.arg.eval(context)); match &self.op[..] { "+" => UnaryOpNode::eval_plus(arg), "-" => UnaryOpNode::eval_minus(arg), "!" => UnaryOpNode::eval_bang(arg), _ => Err(eval::Error::new( &format!("unknown unary operator: `{}`", self.op) )) } } } impl UnaryOpNode { /// Evaluate the "+" operator for one value. fn eval_plus(arg: Value) -> eval::Result { eval1!(arg : Integer { arg }); eval1!(arg : Float { arg }); UnaryOpNode::err("+", &arg) } /// Evaluate the "-" operator for one value. fn eval_minus(arg: Value) -> eval::Result { eval1!(arg : Integer { -arg }); eval1!(arg : Float { -arg }); UnaryOpNode::err("-", &arg) } /// Evaluate the "!" operator for one value. #[inline] fn eval_bang(arg: Value) -> eval::Result
} impl UnaryOpNode { /// Produce an error about invalid argument for an operator. #[inline] fn err(op: &str, arg: &Value) -> eval::Result { Err(eval::Error::new(&format!( "invalid argument for `{}` operator: `{:?}`", op, arg ))) } }
{ let arg = try!(api::conv::bool(arg)).unwrap_bool(); Ok(Value::Boolean(!arg)) }
identifier_body
unary.rs
//! Module implementing evaluation of unary operator AST nodes. use eval::{self, api, Eval, Context, Value}; use parse::ast::UnaryOpNode; impl Eval for UnaryOpNode { fn eval(&self, context: &mut Context) -> eval::Result { let arg = try!(self.arg.eval(context)); match &self.op[..] { "+" => UnaryOpNode::eval_plus(arg), "-" => UnaryOpNode::eval_minus(arg), "!" => UnaryOpNode::eval_bang(arg), _ => Err(eval::Error::new( &format!("unknown unary operator: `{}`", self.op) ))
/// Evaluate the "+" operator for one value. fn eval_plus(arg: Value) -> eval::Result { eval1!(arg : Integer { arg }); eval1!(arg : Float { arg }); UnaryOpNode::err("+", &arg) } /// Evaluate the "-" operator for one value. fn eval_minus(arg: Value) -> eval::Result { eval1!(arg : Integer { -arg }); eval1!(arg : Float { -arg }); UnaryOpNode::err("-", &arg) } /// Evaluate the "!" operator for one value. #[inline] fn eval_bang(arg: Value) -> eval::Result { let arg = try!(api::conv::bool(arg)).unwrap_bool(); Ok(Value::Boolean(!arg)) } } impl UnaryOpNode { /// Produce an error about invalid argument for an operator. #[inline] fn err(op: &str, arg: &Value) -> eval::Result { Err(eval::Error::new(&format!( "invalid argument for `{}` operator: `{:?}`", op, arg ))) } }
} } } impl UnaryOpNode {
random_line_split
person.rs
use std::time; use time::Tm; use mre::model::{Model, Error}; // Create a class to act as our model. Unfortunately Rust's classes aren't // finished yet, and are missing a couple features that would help clean up // models. First, there's no way to mix in implementation of functions, so we // need to duplicate some code that's common across all models. Second, there // is no way to have multiple constructors or static functions, so we need to // move Error handling out into a wrapper function. So to keep the api clean, // we cheat and hide the class so we can make a function that acts like what we // want. pub struct Person { model: Model, } pub impl Person { fn id(&self) -> &self/~str { &self.model._id } fn timestamp(&self) -> ~str { self.model.get_str(&~"timestamp") } fn set_timestamp(&mut self, timestamp: ~str) -> bool { self.model.set_str(~"timestamp", timestamp) } fn name(&self) -> ~str { self.model.get_str(&~"name") } fn set_name(&mut self, name: ~str) -> bool { self.model.set_str(~"name", name) } fn create(&self) -> Result<(~str, uint), Error> { self.model.create() } fn save(&self) -> Result<(~str, uint), Error> { self.model.save() } fn delete(&self)
} // Create a new person model. pub fn Person(es: elasticsearch::Client, name: ~str) -> Person { // Create a person. We'll store the model in the ES index named // "helloeveryone", under the type "person". We'd like ES to make the index // for us, so we leave the id blank. let mut person = Person { model: Model(es, ~"helloeveryone", ~"person", ~"") }; person.set_name(name); person.set_timestamp(time::now().rfc3339()); person } // Return the last 50 people we have said hello to. pub fn last_50(es: elasticsearch::Client) -> ~[Person] { // This query can be a little complicated for those who have never used // elasticsearch. All it says is that we want to fetch 50 documents on the // index "helloeveryone" and the type "person", sorted by time. do mre::model::search(es) |bld| { bld .set_indices(~[~"helloeveryone"]) .set_types(~[~"person"]) .set_source(JsonObjectBuilder() .insert(~"size", 50.0) .insert_list(~"sort", |bld| { bld.push_object(|bld| { bld.insert(~"timestamp", ~"desc"); }); }) .object.take() ); }.map(|model| // Construct a person model from the raw model data. Person { model: *model } ) }
{ self.model.delete() }
identifier_body
person.rs
use std::time; use time::Tm; use mre::model::{Model, Error}; // Create a class to act as our model. Unfortunately Rust's classes aren't // finished yet, and are missing a couple features that would help clean up // models. First, there's no way to mix in implementation of functions, so we // need to duplicate some code that's common across all models. Second, there // is no way to have multiple constructors or static functions, so we need to // move Error handling out into a wrapper function. So to keep the api clean, // we cheat and hide the class so we can make a function that acts like what we // want. pub struct Person { model: Model, } pub impl Person { fn id(&self) -> &self/~str { &self.model._id } fn timestamp(&self) -> ~str { self.model.get_str(&~"timestamp") } fn set_timestamp(&mut self, timestamp: ~str) -> bool { self.model.set_str(~"timestamp", timestamp) } fn name(&self) -> ~str { self.model.get_str(&~"name") } fn set_name(&mut self, name: ~str) -> bool { self.model.set_str(~"name", name) }
fn create(&self) -> Result<(~str, uint), Error> { self.model.create() } fn save(&self) -> Result<(~str, uint), Error> { self.model.save() } fn delete(&self) { self.model.delete() } } // Create a new person model. pub fn Person(es: elasticsearch::Client, name: ~str) -> Person { // Create a person. We'll store the model in the ES index named // "helloeveryone", under the type "person". We'd like ES to make the index // for us, so we leave the id blank. let mut person = Person { model: Model(es, ~"helloeveryone", ~"person", ~"") }; person.set_name(name); person.set_timestamp(time::now().rfc3339()); person } // Return the last 50 people we have said hello to. pub fn last_50(es: elasticsearch::Client) -> ~[Person] { // This query can be a little complicated for those who have never used // elasticsearch. All it says is that we want to fetch 50 documents on the // index "helloeveryone" and the type "person", sorted by time. do mre::model::search(es) |bld| { bld .set_indices(~[~"helloeveryone"]) .set_types(~[~"person"]) .set_source(JsonObjectBuilder() .insert(~"size", 50.0) .insert_list(~"sort", |bld| { bld.push_object(|bld| { bld.insert(~"timestamp", ~"desc"); }); }) .object.take() ); }.map(|model| // Construct a person model from the raw model data. Person { model: *model } ) }
random_line_split