file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
tendermint.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Tendermint params deserialization.
use uint::Uint;
use hash::Address;
use super::ValidatorSet;
/// Tendermint params deserialization.
#[derive(Debug, PartialEq, Deserialize)]
pub struct TendermintParams {
/// Gas limit divisor.
#[serde(rename="gasLimitBoundDivisor")]
pub gas_limit_bound_divisor: Uint,
/// Valid validators.
pub validators: ValidatorSet,
/// Propose step timeout in milliseconds.
#[serde(rename="timeoutPropose")]
pub timeout_propose: Option<Uint>,
/// Prevote step timeout in milliseconds.
#[serde(rename="timeoutPrevote")]
pub timeout_prevote: Option<Uint>,
/// Precommit step timeout in milliseconds.
#[serde(rename="timeoutPrecommit")]
pub timeout_precommit: Option<Uint>,
/// Commit step timeout in milliseconds.
#[serde(rename="timeoutCommit")]
pub timeout_commit: Option<Uint>,
/// Block reward.
#[serde(rename="blockReward")]
pub block_reward: Option<Uint>,
/// Address of the registrar contract.
pub registrar: Option<Address>,
}
/// Tendermint engine deserialization.
#[derive(Debug, PartialEq, Deserialize)]
pub struct Tendermint {
/// Ethash params.
pub params: TendermintParams,
}
#[cfg(test)]
mod tests {
use serde_json;
use uint::Uint;
use util::U256;
use hash::Address;
use util::hash::H160;
use spec::tendermint::Tendermint;
use spec::validator_set::ValidatorSet;
#[test]
fn tendermint_deserialization() |
}
| {
let s = r#"{
"params": {
"gasLimitBoundDivisor": "0x0400",
"validators": {
"list": ["0xc6d9d2cd449a754c494264e1809c50e34d64562b"]
},
"blockReward": "0x50"
}
}"#;
let deserialized: Tendermint = serde_json::from_str(s).unwrap();
assert_eq!(deserialized.params.gas_limit_bound_divisor, Uint(U256::from(0x0400)));
let vs = ValidatorSet::List(vec![Address(H160::from("0xc6d9d2cd449a754c494264e1809c50e34d64562b"))]);
assert_eq!(deserialized.params.validators, vs);
assert_eq!(deserialized.params.block_reward, Some(Uint(U256::from(0x50))));
} | identifier_body |
tendermint.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Tendermint params deserialization.
use uint::Uint;
use hash::Address;
use super::ValidatorSet;
/// Tendermint params deserialization.
#[derive(Debug, PartialEq, Deserialize)]
pub struct TendermintParams {
/// Gas limit divisor.
#[serde(rename="gasLimitBoundDivisor")]
pub gas_limit_bound_divisor: Uint,
/// Valid validators.
pub validators: ValidatorSet,
/// Propose step timeout in milliseconds.
#[serde(rename="timeoutPropose")]
pub timeout_propose: Option<Uint>,
/// Prevote step timeout in milliseconds.
#[serde(rename="timeoutPrevote")]
pub timeout_prevote: Option<Uint>,
/// Precommit step timeout in milliseconds.
#[serde(rename="timeoutPrecommit")]
pub timeout_precommit: Option<Uint>,
/// Commit step timeout in milliseconds.
#[serde(rename="timeoutCommit")]
pub timeout_commit: Option<Uint>,
/// Block reward.
#[serde(rename="blockReward")]
pub block_reward: Option<Uint>,
/// Address of the registrar contract.
pub registrar: Option<Address>,
}
/// Tendermint engine deserialization.
#[derive(Debug, PartialEq, Deserialize)]
pub struct Tendermint {
/// Ethash params.
pub params: TendermintParams,
} | mod tests {
use serde_json;
use uint::Uint;
use util::U256;
use hash::Address;
use util::hash::H160;
use spec::tendermint::Tendermint;
use spec::validator_set::ValidatorSet;
#[test]
fn tendermint_deserialization() {
let s = r#"{
"params": {
"gasLimitBoundDivisor": "0x0400",
"validators": {
"list": ["0xc6d9d2cd449a754c494264e1809c50e34d64562b"]
},
"blockReward": "0x50"
}
}"#;
let deserialized: Tendermint = serde_json::from_str(s).unwrap();
assert_eq!(deserialized.params.gas_limit_bound_divisor, Uint(U256::from(0x0400)));
let vs = ValidatorSet::List(vec![Address(H160::from("0xc6d9d2cd449a754c494264e1809c50e34d64562b"))]);
assert_eq!(deserialized.params.validators, vs);
assert_eq!(deserialized.params.block_reward, Some(Uint(U256::from(0x50))));
}
} |
#[cfg(test)] | random_line_split |
tendermint.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Tendermint params deserialization.
use uint::Uint;
use hash::Address;
use super::ValidatorSet;
/// Tendermint params deserialization.
#[derive(Debug, PartialEq, Deserialize)]
pub struct | {
/// Gas limit divisor.
#[serde(rename="gasLimitBoundDivisor")]
pub gas_limit_bound_divisor: Uint,
/// Valid validators.
pub validators: ValidatorSet,
/// Propose step timeout in milliseconds.
#[serde(rename="timeoutPropose")]
pub timeout_propose: Option<Uint>,
/// Prevote step timeout in milliseconds.
#[serde(rename="timeoutPrevote")]
pub timeout_prevote: Option<Uint>,
/// Precommit step timeout in milliseconds.
#[serde(rename="timeoutPrecommit")]
pub timeout_precommit: Option<Uint>,
/// Commit step timeout in milliseconds.
#[serde(rename="timeoutCommit")]
pub timeout_commit: Option<Uint>,
/// Block reward.
#[serde(rename="blockReward")]
pub block_reward: Option<Uint>,
/// Address of the registrar contract.
pub registrar: Option<Address>,
}
/// Tendermint engine deserialization.
#[derive(Debug, PartialEq, Deserialize)]
pub struct Tendermint {
/// Ethash params.
pub params: TendermintParams,
}
#[cfg(test)]
mod tests {
use serde_json;
use uint::Uint;
use util::U256;
use hash::Address;
use util::hash::H160;
use spec::tendermint::Tendermint;
use spec::validator_set::ValidatorSet;
#[test]
fn tendermint_deserialization() {
let s = r#"{
"params": {
"gasLimitBoundDivisor": "0x0400",
"validators": {
"list": ["0xc6d9d2cd449a754c494264e1809c50e34d64562b"]
},
"blockReward": "0x50"
}
}"#;
let deserialized: Tendermint = serde_json::from_str(s).unwrap();
assert_eq!(deserialized.params.gas_limit_bound_divisor, Uint(U256::from(0x0400)));
let vs = ValidatorSet::List(vec![Address(H160::from("0xc6d9d2cd449a754c494264e1809c50e34d64562b"))]);
assert_eq!(deserialized.params.validators, vs);
assert_eq!(deserialized.params.block_reward, Some(Uint(U256::from(0x50))));
}
}
| TendermintParams | identifier_name |
local-modularized-tricky-pass-2.rs | // check-pass
//
// `#[macro_export] macro_rules` that doesn't originate from macro expansions can be placed
// into the root module soon enough to act as usual items and shadow globs and preludes.
#![feature(decl_macro)]
// `macro_export` shadows globs
use inner1::*;
mod inner1 {
pub macro exported() {}
}
exported!();
mod deep {
fn deep() {
type Deeper = [u8; {
#[macro_export]
macro_rules! exported {
() => ( struct Б; )
}
0
}];
}
}
// `macro_export` shadows std prelude
fn main() { |
mod inner3 {
#[macro_export]
macro_rules! panic {
() => ( struct Г; )
}
}
// `macro_export` shadows builtin macros
include!();
mod inner4 {
#[macro_export]
macro_rules! include {
() => ( struct Д; )
}
}
|
panic!();
}
| identifier_body |
local-modularized-tricky-pass-2.rs | // check-pass
//
// `#[macro_export] macro_rules` that doesn't originate from macro expansions can be placed
// into the root module soon enough to act as usual items and shadow globs and preludes.
#![feature(decl_macro)]
// `macro_export` shadows globs
use inner1::*;
mod inner1 {
pub macro exported() {}
}
exported!();
mod deep {
fn | () {
type Deeper = [u8; {
#[macro_export]
macro_rules! exported {
() => ( struct Б; )
}
0
}];
}
}
// `macro_export` shadows std prelude
fn main() {
panic!();
}
mod inner3 {
#[macro_export]
macro_rules! panic {
() => ( struct Г; )
}
}
// `macro_export` shadows builtin macros
include!();
mod inner4 {
#[macro_export]
macro_rules! include {
() => ( struct Д; )
}
}
| deep | identifier_name |
local-modularized-tricky-pass-2.rs | // check-pass
//
// `#[macro_export] macro_rules` that doesn't originate from macro expansions can be placed
// into the root module soon enough to act as usual items and shadow globs and preludes.
| // `macro_export` shadows globs
use inner1::*;
mod inner1 {
pub macro exported() {}
}
exported!();
mod deep {
fn deep() {
type Deeper = [u8; {
#[macro_export]
macro_rules! exported {
() => ( struct Б; )
}
0
}];
}
}
// `macro_export` shadows std prelude
fn main() {
panic!();
}
mod inner3 {
#[macro_export]
macro_rules! panic {
() => ( struct Г; )
}
}
// `macro_export` shadows builtin macros
include!();
mod inner4 {
#[macro_export]
macro_rules! include {
() => ( struct Д; )
}
} | #![feature(decl_macro)]
| random_line_split |
set_map_velocity.rs | use specs::{ReadStorage, System, WriteStorage};
use crate::campaign::components::MapIntent;
use crate::campaign::components::map_intent::{MapCommand, XAxis, YAxis};
use crate::combat::components::{Velocity};
pub struct SetMapVelocity;
impl<'a> System<'a> for SetMapVelocity {
type SystemData = (ReadStorage<'a, MapIntent>, WriteStorage<'a, Velocity>);
fn run(&mut self, (intent, mut velocity): Self::SystemData) {
use specs::Join;
for (intent, velocity) in (&intent, &mut velocity).join() {
match intent.command {
MapCommand::Move { x, y } => {
if x == XAxis::Centre && y == YAxis::Centre {
velocity.x = 0;
velocity.y = 0;
} else {
velocity.x = x as i32;
velocity.y = y as i32;
}
}
_ => {
velocity.x = 0;
velocity.y = 0;
}
}
} | }
} | random_line_split |
|
set_map_velocity.rs | use specs::{ReadStorage, System, WriteStorage};
use crate::campaign::components::MapIntent;
use crate::campaign::components::map_intent::{MapCommand, XAxis, YAxis};
use crate::combat::components::{Velocity};
pub struct | ;
impl<'a> System<'a> for SetMapVelocity {
type SystemData = (ReadStorage<'a, MapIntent>, WriteStorage<'a, Velocity>);
fn run(&mut self, (intent, mut velocity): Self::SystemData) {
use specs::Join;
for (intent, velocity) in (&intent, &mut velocity).join() {
match intent.command {
MapCommand::Move { x, y } => {
if x == XAxis::Centre && y == YAxis::Centre {
velocity.x = 0;
velocity.y = 0;
} else {
velocity.x = x as i32;
velocity.y = y as i32;
}
}
_ => {
velocity.x = 0;
velocity.y = 0;
}
}
}
}
}
| SetMapVelocity | identifier_name |
set_map_velocity.rs | use specs::{ReadStorage, System, WriteStorage};
use crate::campaign::components::MapIntent;
use crate::campaign::components::map_intent::{MapCommand, XAxis, YAxis};
use crate::combat::components::{Velocity};
pub struct SetMapVelocity;
impl<'a> System<'a> for SetMapVelocity {
type SystemData = (ReadStorage<'a, MapIntent>, WriteStorage<'a, Velocity>);
fn run(&mut self, (intent, mut velocity): Self::SystemData) | }
}
| {
use specs::Join;
for (intent, velocity) in (&intent, &mut velocity).join() {
match intent.command {
MapCommand::Move { x, y } => {
if x == XAxis::Centre && y == YAxis::Centre {
velocity.x = 0;
velocity.y = 0;
} else {
velocity.x = x as i32;
velocity.y = y as i32;
}
}
_ => {
velocity.x = 0;
velocity.y = 0;
}
}
} | identifier_body |
decode.rs | // Claxon -- A FLAC decoding library in Rust
// Copyright 2015 Ruud van Asseldonk
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License has been included in the root of the repository.
// This file implements a decoder, like the reference `flac -d`. It is fast, but
// being fast requires dealing with a few details of the FLAC format. There is
// also a simpler example, `decode_simple`, which is less verbose.
extern crate claxon;
extern crate hound;
use claxon::{Block, FlacReader};
use hound::{WavSpec, WavWriter};
use std::env;
use std::path::Path;
fn decode_file(fname: &Path) {
let mut reader = FlacReader::open(fname).expect("failed to open FLAC stream");
// TODO: Write fallback for other sample widths and channel numbers.
assert!(reader.streaminfo().bits_per_sample == 16);
assert!(reader.streaminfo().channels == 2);
let spec = WavSpec {
channels: reader.streaminfo().channels as u16,
sample_rate: reader.streaminfo().sample_rate,
bits_per_sample: reader.streaminfo().bits_per_sample as u16,
sample_format: hound::SampleFormat::Int,
};
let fname_wav = fname.with_extension("wav");
let mut wav_writer = WavWriter::create(fname_wav, spec).expect("failed to create wav file");
let mut frame_reader = reader.blocks();
let mut block = Block::empty();
loop {
// Read a single frame. Recycle the buffer from the previous frame to
// avoid allocations as much as possible.
match frame_reader.read_next_or_eof(block.into_buffer()) {
Ok(Some(next_block)) => block = next_block,
Ok(None) => break, // EOF.
Err(error) => panic!("{}", error),
}
let mut sample_writer = wav_writer.get_i16_writer(block.duration() * 2);
// Write the samples in the block to the wav file, channels interleaved.
for (left, right) in block.stereo_samples() {
// The `stereo_samples()` iterator does not yield more samples
// than the duration of the block, so we never write more
// samples to the writer than requested, hence using the
// unchecked functions is safe here.
unsafe {
sample_writer.write_sample_unchecked(left);
sample_writer.write_sample_unchecked(right);
}
}
sample_writer.flush().expect("failed to write samples to wav file");
}
wav_writer.finalize().expect("failed to finalize wav file");
}
fn | () {
let mut no_args = true;
for fname in env::args().skip(1) {
no_args = false;
print!("{}", fname);
decode_file(&Path::new(&fname));
println!(": done");
}
if no_args {
println!("no files to decode");
}
}
| main | identifier_name |
decode.rs | // Claxon -- A FLAC decoding library in Rust
// Copyright 2015 Ruud van Asseldonk
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License has been included in the root of the repository.
// This file implements a decoder, like the reference `flac -d`. It is fast, but
// being fast requires dealing with a few details of the FLAC format. There is
// also a simpler example, `decode_simple`, which is less verbose.
extern crate claxon;
extern crate hound;
use claxon::{Block, FlacReader};
use hound::{WavSpec, WavWriter};
use std::env;
use std::path::Path;
fn decode_file(fname: &Path) {
let mut reader = FlacReader::open(fname).expect("failed to open FLAC stream");
// TODO: Write fallback for other sample widths and channel numbers.
assert!(reader.streaminfo().bits_per_sample == 16);
assert!(reader.streaminfo().channels == 2);
let spec = WavSpec {
channels: reader.streaminfo().channels as u16,
sample_rate: reader.streaminfo().sample_rate, | let mut wav_writer = WavWriter::create(fname_wav, spec).expect("failed to create wav file");
let mut frame_reader = reader.blocks();
let mut block = Block::empty();
loop {
// Read a single frame. Recycle the buffer from the previous frame to
// avoid allocations as much as possible.
match frame_reader.read_next_or_eof(block.into_buffer()) {
Ok(Some(next_block)) => block = next_block,
Ok(None) => break, // EOF.
Err(error) => panic!("{}", error),
}
let mut sample_writer = wav_writer.get_i16_writer(block.duration() * 2);
// Write the samples in the block to the wav file, channels interleaved.
for (left, right) in block.stereo_samples() {
// The `stereo_samples()` iterator does not yield more samples
// than the duration of the block, so we never write more
// samples to the writer than requested, hence using the
// unchecked functions is safe here.
unsafe {
sample_writer.write_sample_unchecked(left);
sample_writer.write_sample_unchecked(right);
}
}
sample_writer.flush().expect("failed to write samples to wav file");
}
wav_writer.finalize().expect("failed to finalize wav file");
}
fn main() {
let mut no_args = true;
for fname in env::args().skip(1) {
no_args = false;
print!("{}", fname);
decode_file(&Path::new(&fname));
println!(": done");
}
if no_args {
println!("no files to decode");
}
} | bits_per_sample: reader.streaminfo().bits_per_sample as u16,
sample_format: hound::SampleFormat::Int,
};
let fname_wav = fname.with_extension("wav"); | random_line_split |
decode.rs | // Claxon -- A FLAC decoding library in Rust
// Copyright 2015 Ruud van Asseldonk
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License has been included in the root of the repository.
// This file implements a decoder, like the reference `flac -d`. It is fast, but
// being fast requires dealing with a few details of the FLAC format. There is
// also a simpler example, `decode_simple`, which is less verbose.
extern crate claxon;
extern crate hound;
use claxon::{Block, FlacReader};
use hound::{WavSpec, WavWriter};
use std::env;
use std::path::Path;
fn decode_file(fname: &Path) | // Read a single frame. Recycle the buffer from the previous frame to
// avoid allocations as much as possible.
match frame_reader.read_next_or_eof(block.into_buffer()) {
Ok(Some(next_block)) => block = next_block,
Ok(None) => break, // EOF.
Err(error) => panic!("{}", error),
}
let mut sample_writer = wav_writer.get_i16_writer(block.duration() * 2);
// Write the samples in the block to the wav file, channels interleaved.
for (left, right) in block.stereo_samples() {
// The `stereo_samples()` iterator does not yield more samples
// than the duration of the block, so we never write more
// samples to the writer than requested, hence using the
// unchecked functions is safe here.
unsafe {
sample_writer.write_sample_unchecked(left);
sample_writer.write_sample_unchecked(right);
}
}
sample_writer.flush().expect("failed to write samples to wav file");
}
wav_writer.finalize().expect("failed to finalize wav file");
}
fn main() {
let mut no_args = true;
for fname in env::args().skip(1) {
no_args = false;
print!("{}", fname);
decode_file(&Path::new(&fname));
println!(": done");
}
if no_args {
println!("no files to decode");
}
}
| {
let mut reader = FlacReader::open(fname).expect("failed to open FLAC stream");
// TODO: Write fallback for other sample widths and channel numbers.
assert!(reader.streaminfo().bits_per_sample == 16);
assert!(reader.streaminfo().channels == 2);
let spec = WavSpec {
channels: reader.streaminfo().channels as u16,
sample_rate: reader.streaminfo().sample_rate,
bits_per_sample: reader.streaminfo().bits_per_sample as u16,
sample_format: hound::SampleFormat::Int,
};
let fname_wav = fname.with_extension("wav");
let mut wav_writer = WavWriter::create(fname_wav, spec).expect("failed to create wav file");
let mut frame_reader = reader.blocks();
let mut block = Block::empty();
loop { | identifier_body |
dst-tuple.rs | // run-pass
#![allow(type_alias_bounds)]
#![feature(unsized_tuple_coercion)]
type Fat<T:?Sized> = (isize, &'static str, T);
// x is a fat pointer
fn foo(x: &Fat<[isize]>) {
let y = &x.2;
assert_eq!(x.2.len(), 3);
assert_eq!(y[0], 1);
assert_eq!(x.2[1], 2);
assert_eq!(x.0, 5);
assert_eq!(x.1, "some str");
}
fn foo2<T:ToBar>(x: &Fat<[T]>) {
let y = &x.2;
let bar = Bar;
assert_eq!(x.2.len(), 3);
assert_eq!(y[0].to_bar(), bar);
assert_eq!(x.2[1].to_bar(), bar);
assert_eq!(x.0, 5);
assert_eq!(x.1, "some str");
}
fn foo3(x: &Fat<Fat<[isize]>>) {
let y = &(x.2).2;
assert_eq!(x.0, 5);
assert_eq!(x.1, "some str");
assert_eq!((x.2).0, 8);
assert_eq!((x.2).1, "deep str");
assert_eq!((x.2).2.len(), 3);
assert_eq!(y[0], 1);
assert_eq!((x.2).2[1], 2);
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
struct Bar;
trait ToBar {
fn to_bar(&self) -> Bar;
}
impl ToBar for Bar {
fn to_bar(&self) -> Bar {
*self
}
}
pub fn main() {
// With a vec of ints.
let f1 = (5, "some str", [1, 2, 3]);
foo(&f1);
let f2 = &f1;
foo(f2);
let f3: &Fat<[isize]> = f2;
foo(f3);
let f4: &Fat<[isize]> = &f1;
foo(f4);
let f5: &Fat<[isize]> = &(5, "some str", [1, 2, 3]); | foo(f5);
// With a vec of Bars.
let bar = Bar;
let f1 = (5, "some str", [bar, bar, bar]);
foo2(&f1);
let f2 = &f1;
foo2(f2);
let f3: &Fat<[Bar]> = f2;
foo2(f3);
let f4: &Fat<[Bar]> = &f1;
foo2(f4);
let f5: &Fat<[Bar]> = &(5, "some str", [bar, bar, bar]);
foo2(f5);
// Assignment.
let f5: &mut Fat<[isize]> = &mut (5, "some str", [1, 2, 3]);
f5.2[1] = 34;
assert_eq!(f5.2[0], 1);
assert_eq!(f5.2[1], 34);
assert_eq!(f5.2[2], 3);
// Zero size vec.
let f5: &Fat<[isize]> = &(5, "some str", []);
assert!(f5.2.is_empty());
let f5: &Fat<[Bar]> = &(5, "some str", []);
assert!(f5.2.is_empty());
// Deeply nested.
let f1 = (5, "some str", (8, "deep str", [1, 2, 3]));
foo3(&f1);
let f2 = &f1;
foo3(f2);
let f3: &Fat<Fat<[isize]>> = f2;
foo3(f3);
let f4: &Fat<Fat<[isize]>> = &f1;
foo3(f4);
let f5: &Fat<Fat<[isize]>> = &(5, "some str", (8, "deep str", [1, 2, 3]));
foo3(f5);
// Box.
let f1 = Box::new([1, 2, 3]);
assert_eq!((*f1)[1], 2);
let f2: Box<[isize]> = f1;
assert_eq!((*f2)[1], 2);
// Nested Box.
let f1 : Box<Fat<[isize; 3]>> = Box::new((5, "some str", [1, 2, 3]));
foo(&*f1);
let f2 : Box<Fat<[isize]>> = f1;
foo(&*f2);
let f3 : Box<Fat<[isize]>> =
Box::<Fat<[_; 3]>>::new((5, "some str", [1, 2, 3]));
foo(&*f3);
} | random_line_split |
|
dst-tuple.rs | // run-pass
#![allow(type_alias_bounds)]
#![feature(unsized_tuple_coercion)]
type Fat<T:?Sized> = (isize, &'static str, T);
// x is a fat pointer
fn foo(x: &Fat<[isize]>) |
fn foo2<T:ToBar>(x: &Fat<[T]>) {
let y = &x.2;
let bar = Bar;
assert_eq!(x.2.len(), 3);
assert_eq!(y[0].to_bar(), bar);
assert_eq!(x.2[1].to_bar(), bar);
assert_eq!(x.0, 5);
assert_eq!(x.1, "some str");
}
fn foo3(x: &Fat<Fat<[isize]>>) {
let y = &(x.2).2;
assert_eq!(x.0, 5);
assert_eq!(x.1, "some str");
assert_eq!((x.2).0, 8);
assert_eq!((x.2).1, "deep str");
assert_eq!((x.2).2.len(), 3);
assert_eq!(y[0], 1);
assert_eq!((x.2).2[1], 2);
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
struct Bar;
trait ToBar {
fn to_bar(&self) -> Bar;
}
impl ToBar for Bar {
fn to_bar(&self) -> Bar {
*self
}
}
pub fn main() {
// With a vec of ints.
let f1 = (5, "some str", [1, 2, 3]);
foo(&f1);
let f2 = &f1;
foo(f2);
let f3: &Fat<[isize]> = f2;
foo(f3);
let f4: &Fat<[isize]> = &f1;
foo(f4);
let f5: &Fat<[isize]> = &(5, "some str", [1, 2, 3]);
foo(f5);
// With a vec of Bars.
let bar = Bar;
let f1 = (5, "some str", [bar, bar, bar]);
foo2(&f1);
let f2 = &f1;
foo2(f2);
let f3: &Fat<[Bar]> = f2;
foo2(f3);
let f4: &Fat<[Bar]> = &f1;
foo2(f4);
let f5: &Fat<[Bar]> = &(5, "some str", [bar, bar, bar]);
foo2(f5);
// Assignment.
let f5: &mut Fat<[isize]> = &mut (5, "some str", [1, 2, 3]);
f5.2[1] = 34;
assert_eq!(f5.2[0], 1);
assert_eq!(f5.2[1], 34);
assert_eq!(f5.2[2], 3);
// Zero size vec.
let f5: &Fat<[isize]> = &(5, "some str", []);
assert!(f5.2.is_empty());
let f5: &Fat<[Bar]> = &(5, "some str", []);
assert!(f5.2.is_empty());
// Deeply nested.
let f1 = (5, "some str", (8, "deep str", [1, 2, 3]));
foo3(&f1);
let f2 = &f1;
foo3(f2);
let f3: &Fat<Fat<[isize]>> = f2;
foo3(f3);
let f4: &Fat<Fat<[isize]>> = &f1;
foo3(f4);
let f5: &Fat<Fat<[isize]>> = &(5, "some str", (8, "deep str", [1, 2, 3]));
foo3(f5);
// Box.
let f1 = Box::new([1, 2, 3]);
assert_eq!((*f1)[1], 2);
let f2: Box<[isize]> = f1;
assert_eq!((*f2)[1], 2);
// Nested Box.
let f1 : Box<Fat<[isize; 3]>> = Box::new((5, "some str", [1, 2, 3]));
foo(&*f1);
let f2 : Box<Fat<[isize]>> = f1;
foo(&*f2);
let f3 : Box<Fat<[isize]>> =
Box::<Fat<[_; 3]>>::new((5, "some str", [1, 2, 3]));
foo(&*f3);
}
| {
let y = &x.2;
assert_eq!(x.2.len(), 3);
assert_eq!(y[0], 1);
assert_eq!(x.2[1], 2);
assert_eq!(x.0, 5);
assert_eq!(x.1, "some str");
} | identifier_body |
dst-tuple.rs | // run-pass
#![allow(type_alias_bounds)]
#![feature(unsized_tuple_coercion)]
type Fat<T:?Sized> = (isize, &'static str, T);
// x is a fat pointer
fn foo(x: &Fat<[isize]>) {
let y = &x.2;
assert_eq!(x.2.len(), 3);
assert_eq!(y[0], 1);
assert_eq!(x.2[1], 2);
assert_eq!(x.0, 5);
assert_eq!(x.1, "some str");
}
fn foo2<T:ToBar>(x: &Fat<[T]>) {
let y = &x.2;
let bar = Bar;
assert_eq!(x.2.len(), 3);
assert_eq!(y[0].to_bar(), bar);
assert_eq!(x.2[1].to_bar(), bar);
assert_eq!(x.0, 5);
assert_eq!(x.1, "some str");
}
fn foo3(x: &Fat<Fat<[isize]>>) {
let y = &(x.2).2;
assert_eq!(x.0, 5);
assert_eq!(x.1, "some str");
assert_eq!((x.2).0, 8);
assert_eq!((x.2).1, "deep str");
assert_eq!((x.2).2.len(), 3);
assert_eq!(y[0], 1);
assert_eq!((x.2).2[1], 2);
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
struct | ;
trait ToBar {
fn to_bar(&self) -> Bar;
}
impl ToBar for Bar {
fn to_bar(&self) -> Bar {
*self
}
}
pub fn main() {
// With a vec of ints.
let f1 = (5, "some str", [1, 2, 3]);
foo(&f1);
let f2 = &f1;
foo(f2);
let f3: &Fat<[isize]> = f2;
foo(f3);
let f4: &Fat<[isize]> = &f1;
foo(f4);
let f5: &Fat<[isize]> = &(5, "some str", [1, 2, 3]);
foo(f5);
// With a vec of Bars.
let bar = Bar;
let f1 = (5, "some str", [bar, bar, bar]);
foo2(&f1);
let f2 = &f1;
foo2(f2);
let f3: &Fat<[Bar]> = f2;
foo2(f3);
let f4: &Fat<[Bar]> = &f1;
foo2(f4);
let f5: &Fat<[Bar]> = &(5, "some str", [bar, bar, bar]);
foo2(f5);
// Assignment.
let f5: &mut Fat<[isize]> = &mut (5, "some str", [1, 2, 3]);
f5.2[1] = 34;
assert_eq!(f5.2[0], 1);
assert_eq!(f5.2[1], 34);
assert_eq!(f5.2[2], 3);
// Zero size vec.
let f5: &Fat<[isize]> = &(5, "some str", []);
assert!(f5.2.is_empty());
let f5: &Fat<[Bar]> = &(5, "some str", []);
assert!(f5.2.is_empty());
// Deeply nested.
let f1 = (5, "some str", (8, "deep str", [1, 2, 3]));
foo3(&f1);
let f2 = &f1;
foo3(f2);
let f3: &Fat<Fat<[isize]>> = f2;
foo3(f3);
let f4: &Fat<Fat<[isize]>> = &f1;
foo3(f4);
let f5: &Fat<Fat<[isize]>> = &(5, "some str", (8, "deep str", [1, 2, 3]));
foo3(f5);
// Box.
let f1 = Box::new([1, 2, 3]);
assert_eq!((*f1)[1], 2);
let f2: Box<[isize]> = f1;
assert_eq!((*f2)[1], 2);
// Nested Box.
let f1 : Box<Fat<[isize; 3]>> = Box::new((5, "some str", [1, 2, 3]));
foo(&*f1);
let f2 : Box<Fat<[isize]>> = f1;
foo(&*f2);
let f3 : Box<Fat<[isize]>> =
Box::<Fat<[_; 3]>>::new((5, "some str", [1, 2, 3]));
foo(&*f3);
}
| Bar | identifier_name |
rpath.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::collections::HashSet;
use std::env;
use std::path::{Path, PathBuf};
use std::fs;
use syntax::ast;
pub struct RPathConfig<'a> {
pub used_crates: Vec<(ast::CrateNum, Option<PathBuf>)>,
pub out_filename: PathBuf,
pub is_like_osx: bool,
pub has_rpath: bool,
pub get_install_prefix_lib_path: &'a mut FnMut() -> PathBuf,
}
pub fn get_rpath_flags(config: &mut RPathConfig) -> Vec<String> {
// No rpath on windows
if!config.has_rpath {
return Vec::new();
}
let mut flags = Vec::new();
debug!("preparing the RPATH!");
let libs = config.used_crates.clone();
let libs = libs.into_iter().filter_map(|(_, l)| l).collect::<Vec<_>>();
let rpaths = get_rpaths(config, &libs[..]);
flags.push_all(&rpaths_to_flags(&rpaths[..]));
flags
}
fn rpaths_to_flags(rpaths: &[String]) -> Vec<String> {
let mut ret = Vec::new();
for rpath in rpaths {
ret.push(format!("-Wl,-rpath,{}", &(*rpath)));
}
return ret;
}
fn get_rpaths(config: &mut RPathConfig, libs: &[PathBuf]) -> Vec<String> {
debug!("output: {:?}", config.out_filename.display());
debug!("libs:");
for libpath in libs {
debug!(" {:?}", libpath.display());
}
// Use relative paths to the libraries. Binaries can be moved
// as long as they maintain the relative relationship to the
// crates they depend on.
let rel_rpaths = get_rpaths_relative_to_output(config, libs);
// And a final backup rpath to the global library location.
let fallback_rpaths = vec!(get_install_prefix_rpath(config));
fn log_rpaths(desc: &str, rpaths: &[String]) {
debug!("{} rpaths:", desc);
for rpath in rpaths {
debug!(" {}", *rpath);
}
}
log_rpaths("relative", &rel_rpaths[..]);
log_rpaths("fallback", &fallback_rpaths[..]);
let mut rpaths = rel_rpaths;
rpaths.push_all(&fallback_rpaths[..]);
// Remove duplicates
let rpaths = minimize_rpaths(&rpaths[..]);
return rpaths;
}
fn get_rpaths_relative_to_output(config: &mut RPathConfig,
libs: &[PathBuf]) -> Vec<String> {
libs.iter().map(|a| get_rpath_relative_to_output(config, a)).collect()
}
fn get_rpath_relative_to_output(config: &mut RPathConfig, lib: &Path) -> String {
// Mac doesn't appear to support $ORIGIN
let prefix = if config.is_like_osx {
"@loader_path"
} else {
"$ORIGIN"
};
let cwd = env::current_dir().unwrap();
let mut lib = fs::canonicalize(&cwd.join(lib)).unwrap_or(cwd.join(lib));
lib.pop();
let mut output = cwd.join(&config.out_filename);
output.pop();
let output = fs::canonicalize(&output).unwrap_or(output);
let relative = path_relative_from(&lib, &output)
.expect(&format!("couldn't create relative path from {:?} to {:?}", output, lib));
// FIXME (#9639): This needs to handle non-utf8 paths
format!("{}/{}", prefix,
relative.to_str().expect("non-utf8 component in path"))
}
// This routine is adapted from the *old* Path's `path_relative_from`
// function, which works differently from the new `relative_from` function.
// In particular, this handles the case on unix where both paths are
// absolute but with only the root as the common directory.
fn path_relative_from(path: &Path, base: &Path) -> Option<PathBuf> {
use std::path::Component;
if path.is_absolute()!= base.is_absolute() {
if path.is_absolute() {
Some(PathBuf::from(path))
} else {
None
}
} else {
let mut ita = path.components();
let mut itb = base.components();
let mut comps: Vec<Component> = vec![];
loop {
match (ita.next(), itb.next()) {
(None, None) => break,
(Some(a), None) => {
comps.push(a);
comps.extend(ita.by_ref());
break;
}
(None, _) => comps.push(Component::ParentDir),
(Some(a), Some(b)) if comps.is_empty() && a == b => (),
(Some(a), Some(b)) if b == Component::CurDir => comps.push(a),
(Some(_), Some(b)) if b == Component::ParentDir => return None,
(Some(a), Some(_)) => {
comps.push(Component::ParentDir);
for _ in itb {
comps.push(Component::ParentDir);
}
comps.push(a);
comps.extend(ita.by_ref());
break;
}
}
}
Some(comps.iter().map(|c| c.as_os_str()).collect())
}
}
fn get_install_prefix_rpath(config: &mut RPathConfig) -> String {
let path = (config.get_install_prefix_lib_path)();
let path = env::current_dir().unwrap().join(&path);
// FIXME (#9639): This needs to handle non-utf8 paths
path.to_str().expect("non-utf8 component in rpath").to_string()
}
fn minimize_rpaths(rpaths: &[String]) -> Vec<String> {
let mut set = HashSet::new();
let mut minimized = Vec::new();
for rpath in rpaths {
if set.insert(&rpath[..]) {
minimized.push(rpath.clone());
}
}
minimized
}
#[cfg(all(unix, test))]
mod tests {
use super::{RPathConfig};
use super::{minimize_rpaths, rpaths_to_flags, get_rpath_relative_to_output};
use std::path::{Path, PathBuf};
#[test]
fn test_rpaths_to_flags() {
let flags = rpaths_to_flags(&[
"path1".to_string(),
"path2".to_string()
]);
assert_eq!(flags, | fn test_minimize1() {
let res = minimize_rpaths(&[
"rpath1".to_string(),
"rpath2".to_string(),
"rpath1".to_string()
]);
assert!(res == [
"rpath1",
"rpath2",
]);
}
#[test]
fn test_minimize2() {
let res = minimize_rpaths(&[
"1a".to_string(),
"2".to_string(),
"2".to_string(),
"1a".to_string(),
"4a".to_string(),
"1a".to_string(),
"2".to_string(),
"3".to_string(),
"4a".to_string(),
"3".to_string()
]);
assert!(res == [
"1a",
"2",
"4a",
"3",
]);
}
#[test]
fn test_rpath_relative() {
if cfg!(target_os = "macos") {
let config = &mut RPathConfig {
used_crates: Vec::new(),
has_rpath: true,
is_like_osx: true,
out_filename: PathBuf::from("bin/rustc"),
get_install_prefix_lib_path: &mut || panic!(),
};
let res = get_rpath_relative_to_output(config,
Path::new("lib/libstd.so"));
assert_eq!(res, "@loader_path/../lib");
} else {
let config = &mut RPathConfig {
used_crates: Vec::new(),
out_filename: PathBuf::from("bin/rustc"),
get_install_prefix_lib_path: &mut || panic!(),
has_rpath: true,
is_like_osx: false,
};
let res = get_rpath_relative_to_output(config,
Path::new("lib/libstd.so"));
assert_eq!(res, "$ORIGIN/../lib");
}
}
} | ["-Wl,-rpath,path1",
"-Wl,-rpath,path2"]);
}
#[test] | random_line_split |
rpath.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::collections::HashSet;
use std::env;
use std::path::{Path, PathBuf};
use std::fs;
use syntax::ast;
pub struct RPathConfig<'a> {
pub used_crates: Vec<(ast::CrateNum, Option<PathBuf>)>,
pub out_filename: PathBuf,
pub is_like_osx: bool,
pub has_rpath: bool,
pub get_install_prefix_lib_path: &'a mut FnMut() -> PathBuf,
}
pub fn get_rpath_flags(config: &mut RPathConfig) -> Vec<String> {
// No rpath on windows
if!config.has_rpath {
return Vec::new();
}
let mut flags = Vec::new();
debug!("preparing the RPATH!");
let libs = config.used_crates.clone();
let libs = libs.into_iter().filter_map(|(_, l)| l).collect::<Vec<_>>();
let rpaths = get_rpaths(config, &libs[..]);
flags.push_all(&rpaths_to_flags(&rpaths[..]));
flags
}
fn rpaths_to_flags(rpaths: &[String]) -> Vec<String> {
let mut ret = Vec::new();
for rpath in rpaths {
ret.push(format!("-Wl,-rpath,{}", &(*rpath)));
}
return ret;
}
fn get_rpaths(config: &mut RPathConfig, libs: &[PathBuf]) -> Vec<String> {
debug!("output: {:?}", config.out_filename.display());
debug!("libs:");
for libpath in libs {
debug!(" {:?}", libpath.display());
}
// Use relative paths to the libraries. Binaries can be moved
// as long as they maintain the relative relationship to the
// crates they depend on.
let rel_rpaths = get_rpaths_relative_to_output(config, libs);
// And a final backup rpath to the global library location.
let fallback_rpaths = vec!(get_install_prefix_rpath(config));
fn log_rpaths(desc: &str, rpaths: &[String]) {
debug!("{} rpaths:", desc);
for rpath in rpaths {
debug!(" {}", *rpath);
}
}
log_rpaths("relative", &rel_rpaths[..]);
log_rpaths("fallback", &fallback_rpaths[..]);
let mut rpaths = rel_rpaths;
rpaths.push_all(&fallback_rpaths[..]);
// Remove duplicates
let rpaths = minimize_rpaths(&rpaths[..]);
return rpaths;
}
fn get_rpaths_relative_to_output(config: &mut RPathConfig,
libs: &[PathBuf]) -> Vec<String> {
libs.iter().map(|a| get_rpath_relative_to_output(config, a)).collect()
}
fn | (config: &mut RPathConfig, lib: &Path) -> String {
// Mac doesn't appear to support $ORIGIN
let prefix = if config.is_like_osx {
"@loader_path"
} else {
"$ORIGIN"
};
let cwd = env::current_dir().unwrap();
let mut lib = fs::canonicalize(&cwd.join(lib)).unwrap_or(cwd.join(lib));
lib.pop();
let mut output = cwd.join(&config.out_filename);
output.pop();
let output = fs::canonicalize(&output).unwrap_or(output);
let relative = path_relative_from(&lib, &output)
.expect(&format!("couldn't create relative path from {:?} to {:?}", output, lib));
// FIXME (#9639): This needs to handle non-utf8 paths
format!("{}/{}", prefix,
relative.to_str().expect("non-utf8 component in path"))
}
// This routine is adapted from the *old* Path's `path_relative_from`
// function, which works differently from the new `relative_from` function.
// In particular, this handles the case on unix where both paths are
// absolute but with only the root as the common directory.
fn path_relative_from(path: &Path, base: &Path) -> Option<PathBuf> {
use std::path::Component;
if path.is_absolute()!= base.is_absolute() {
if path.is_absolute() {
Some(PathBuf::from(path))
} else {
None
}
} else {
let mut ita = path.components();
let mut itb = base.components();
let mut comps: Vec<Component> = vec![];
loop {
match (ita.next(), itb.next()) {
(None, None) => break,
(Some(a), None) => {
comps.push(a);
comps.extend(ita.by_ref());
break;
}
(None, _) => comps.push(Component::ParentDir),
(Some(a), Some(b)) if comps.is_empty() && a == b => (),
(Some(a), Some(b)) if b == Component::CurDir => comps.push(a),
(Some(_), Some(b)) if b == Component::ParentDir => return None,
(Some(a), Some(_)) => {
comps.push(Component::ParentDir);
for _ in itb {
comps.push(Component::ParentDir);
}
comps.push(a);
comps.extend(ita.by_ref());
break;
}
}
}
Some(comps.iter().map(|c| c.as_os_str()).collect())
}
}
fn get_install_prefix_rpath(config: &mut RPathConfig) -> String {
let path = (config.get_install_prefix_lib_path)();
let path = env::current_dir().unwrap().join(&path);
// FIXME (#9639): This needs to handle non-utf8 paths
path.to_str().expect("non-utf8 component in rpath").to_string()
}
fn minimize_rpaths(rpaths: &[String]) -> Vec<String> {
let mut set = HashSet::new();
let mut minimized = Vec::new();
for rpath in rpaths {
if set.insert(&rpath[..]) {
minimized.push(rpath.clone());
}
}
minimized
}
#[cfg(all(unix, test))]
mod tests {
use super::{RPathConfig};
use super::{minimize_rpaths, rpaths_to_flags, get_rpath_relative_to_output};
use std::path::{Path, PathBuf};
#[test]
fn test_rpaths_to_flags() {
let flags = rpaths_to_flags(&[
"path1".to_string(),
"path2".to_string()
]);
assert_eq!(flags,
["-Wl,-rpath,path1",
"-Wl,-rpath,path2"]);
}
#[test]
fn test_minimize1() {
let res = minimize_rpaths(&[
"rpath1".to_string(),
"rpath2".to_string(),
"rpath1".to_string()
]);
assert!(res == [
"rpath1",
"rpath2",
]);
}
#[test]
fn test_minimize2() {
let res = minimize_rpaths(&[
"1a".to_string(),
"2".to_string(),
"2".to_string(),
"1a".to_string(),
"4a".to_string(),
"1a".to_string(),
"2".to_string(),
"3".to_string(),
"4a".to_string(),
"3".to_string()
]);
assert!(res == [
"1a",
"2",
"4a",
"3",
]);
}
#[test]
fn test_rpath_relative() {
if cfg!(target_os = "macos") {
let config = &mut RPathConfig {
used_crates: Vec::new(),
has_rpath: true,
is_like_osx: true,
out_filename: PathBuf::from("bin/rustc"),
get_install_prefix_lib_path: &mut || panic!(),
};
let res = get_rpath_relative_to_output(config,
Path::new("lib/libstd.so"));
assert_eq!(res, "@loader_path/../lib");
} else {
let config = &mut RPathConfig {
used_crates: Vec::new(),
out_filename: PathBuf::from("bin/rustc"),
get_install_prefix_lib_path: &mut || panic!(),
has_rpath: true,
is_like_osx: false,
};
let res = get_rpath_relative_to_output(config,
Path::new("lib/libstd.so"));
assert_eq!(res, "$ORIGIN/../lib");
}
}
}
| get_rpath_relative_to_output | identifier_name |
rpath.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::collections::HashSet;
use std::env;
use std::path::{Path, PathBuf};
use std::fs;
use syntax::ast;
pub struct RPathConfig<'a> {
pub used_crates: Vec<(ast::CrateNum, Option<PathBuf>)>,
pub out_filename: PathBuf,
pub is_like_osx: bool,
pub has_rpath: bool,
pub get_install_prefix_lib_path: &'a mut FnMut() -> PathBuf,
}
pub fn get_rpath_flags(config: &mut RPathConfig) -> Vec<String> {
// No rpath on windows
if!config.has_rpath {
return Vec::new();
}
let mut flags = Vec::new();
debug!("preparing the RPATH!");
let libs = config.used_crates.clone();
let libs = libs.into_iter().filter_map(|(_, l)| l).collect::<Vec<_>>();
let rpaths = get_rpaths(config, &libs[..]);
flags.push_all(&rpaths_to_flags(&rpaths[..]));
flags
}
fn rpaths_to_flags(rpaths: &[String]) -> Vec<String> {
let mut ret = Vec::new();
for rpath in rpaths {
ret.push(format!("-Wl,-rpath,{}", &(*rpath)));
}
return ret;
}
fn get_rpaths(config: &mut RPathConfig, libs: &[PathBuf]) -> Vec<String> {
debug!("output: {:?}", config.out_filename.display());
debug!("libs:");
for libpath in libs {
debug!(" {:?}", libpath.display());
}
// Use relative paths to the libraries. Binaries can be moved
// as long as they maintain the relative relationship to the
// crates they depend on.
let rel_rpaths = get_rpaths_relative_to_output(config, libs);
// And a final backup rpath to the global library location.
let fallback_rpaths = vec!(get_install_prefix_rpath(config));
fn log_rpaths(desc: &str, rpaths: &[String]) {
debug!("{} rpaths:", desc);
for rpath in rpaths {
debug!(" {}", *rpath);
}
}
log_rpaths("relative", &rel_rpaths[..]);
log_rpaths("fallback", &fallback_rpaths[..]);
let mut rpaths = rel_rpaths;
rpaths.push_all(&fallback_rpaths[..]);
// Remove duplicates
let rpaths = minimize_rpaths(&rpaths[..]);
return rpaths;
}
fn get_rpaths_relative_to_output(config: &mut RPathConfig,
libs: &[PathBuf]) -> Vec<String> |
fn get_rpath_relative_to_output(config: &mut RPathConfig, lib: &Path) -> String {
// Mac doesn't appear to support $ORIGIN
let prefix = if config.is_like_osx {
"@loader_path"
} else {
"$ORIGIN"
};
let cwd = env::current_dir().unwrap();
let mut lib = fs::canonicalize(&cwd.join(lib)).unwrap_or(cwd.join(lib));
lib.pop();
let mut output = cwd.join(&config.out_filename);
output.pop();
let output = fs::canonicalize(&output).unwrap_or(output);
let relative = path_relative_from(&lib, &output)
.expect(&format!("couldn't create relative path from {:?} to {:?}", output, lib));
// FIXME (#9639): This needs to handle non-utf8 paths
format!("{}/{}", prefix,
relative.to_str().expect("non-utf8 component in path"))
}
// This routine is adapted from the *old* Path's `path_relative_from`
// function, which works differently from the new `relative_from` function.
// In particular, this handles the case on unix where both paths are
// absolute but with only the root as the common directory.
fn path_relative_from(path: &Path, base: &Path) -> Option<PathBuf> {
use std::path::Component;
if path.is_absolute()!= base.is_absolute() {
if path.is_absolute() {
Some(PathBuf::from(path))
} else {
None
}
} else {
let mut ita = path.components();
let mut itb = base.components();
let mut comps: Vec<Component> = vec![];
loop {
match (ita.next(), itb.next()) {
(None, None) => break,
(Some(a), None) => {
comps.push(a);
comps.extend(ita.by_ref());
break;
}
(None, _) => comps.push(Component::ParentDir),
(Some(a), Some(b)) if comps.is_empty() && a == b => (),
(Some(a), Some(b)) if b == Component::CurDir => comps.push(a),
(Some(_), Some(b)) if b == Component::ParentDir => return None,
(Some(a), Some(_)) => {
comps.push(Component::ParentDir);
for _ in itb {
comps.push(Component::ParentDir);
}
comps.push(a);
comps.extend(ita.by_ref());
break;
}
}
}
Some(comps.iter().map(|c| c.as_os_str()).collect())
}
}
fn get_install_prefix_rpath(config: &mut RPathConfig) -> String {
let path = (config.get_install_prefix_lib_path)();
let path = env::current_dir().unwrap().join(&path);
// FIXME (#9639): This needs to handle non-utf8 paths
path.to_str().expect("non-utf8 component in rpath").to_string()
}
fn minimize_rpaths(rpaths: &[String]) -> Vec<String> {
let mut set = HashSet::new();
let mut minimized = Vec::new();
for rpath in rpaths {
if set.insert(&rpath[..]) {
minimized.push(rpath.clone());
}
}
minimized
}
#[cfg(all(unix, test))]
mod tests {
use super::{RPathConfig};
use super::{minimize_rpaths, rpaths_to_flags, get_rpath_relative_to_output};
use std::path::{Path, PathBuf};
#[test]
fn test_rpaths_to_flags() {
let flags = rpaths_to_flags(&[
"path1".to_string(),
"path2".to_string()
]);
assert_eq!(flags,
["-Wl,-rpath,path1",
"-Wl,-rpath,path2"]);
}
#[test]
fn test_minimize1() {
let res = minimize_rpaths(&[
"rpath1".to_string(),
"rpath2".to_string(),
"rpath1".to_string()
]);
assert!(res == [
"rpath1",
"rpath2",
]);
}
#[test]
fn test_minimize2() {
let res = minimize_rpaths(&[
"1a".to_string(),
"2".to_string(),
"2".to_string(),
"1a".to_string(),
"4a".to_string(),
"1a".to_string(),
"2".to_string(),
"3".to_string(),
"4a".to_string(),
"3".to_string()
]);
assert!(res == [
"1a",
"2",
"4a",
"3",
]);
}
#[test]
fn test_rpath_relative() {
if cfg!(target_os = "macos") {
let config = &mut RPathConfig {
used_crates: Vec::new(),
has_rpath: true,
is_like_osx: true,
out_filename: PathBuf::from("bin/rustc"),
get_install_prefix_lib_path: &mut || panic!(),
};
let res = get_rpath_relative_to_output(config,
Path::new("lib/libstd.so"));
assert_eq!(res, "@loader_path/../lib");
} else {
let config = &mut RPathConfig {
used_crates: Vec::new(),
out_filename: PathBuf::from("bin/rustc"),
get_install_prefix_lib_path: &mut || panic!(),
has_rpath: true,
is_like_osx: false,
};
let res = get_rpath_relative_to_output(config,
Path::new("lib/libstd.so"));
assert_eq!(res, "$ORIGIN/../lib");
}
}
}
| {
libs.iter().map(|a| get_rpath_relative_to_output(config, a)).collect()
} | identifier_body |
lib.rs | use std::fmt::{Display, Error, Formatter};
pub struct Game {
intended_word: String,
guessed: Vec<CharState>,
}
impl Game {
pub fn new(intended_word: String) -> Game {
let guessed = intended_word.chars().map(|ch| CharState::new(ch, false)).collect();
Game {
intended_word: intended_word,
guessed : guessed,
}
}
pub fn intended_word<'r>(&'r self) -> &'r str {
&self.intended_word[]
}
pub fn current_progress(&self) -> CurrentProgress {
CurrentProgress { vec : self.guessed.iter().map(|x| x.to_enum()).collect() }
}
pub fn letters_left(&self) -> usize {
self.guessed.iter().filter(|&x|!x.is_guessed).count()
}
pub fn guess(&mut self, guess : &str) {
for ch in guess.chars() {
self.guess_char(ch);
}
}
fn guess_char(&mut self, ch : char) {
for g_res in self.guessed.iter_mut().filter(|x| x.ch==ch) {
g_res.is_guessed = true;
}
}
}
struct CharState {
ch : char,
is_guessed : bool,
}
impl CharState {
fn new(ch : char, is_guessed : bool) -> CharState {
CharState {
ch : ch,
is_guessed : is_guessed,
}
}
fn to_enum(&self) -> Guessed {
if self.is_guessed {
Some(self.ch)
} else {
None | }
pub type Guessed = Option<char>;
#[derive(Debug)]
pub struct CurrentProgress {
vec : Vec<Guessed>,
}
impl Display for CurrentProgress {
fn fmt(&self, f : &mut Formatter) -> Result<(), Error> {
let mut result = String::with_capacity(self.vec.len() * 2);
for guess in self.vec.iter() {
match *guess {
Some(ch) => result.push(ch),
None => result.push('_'),
}
result.push(' ');
}
write!(f, "{}", result.as_slice())
}
} | }
} | random_line_split |
lib.rs | use std::fmt::{Display, Error, Formatter};
pub struct Game {
intended_word: String,
guessed: Vec<CharState>,
}
impl Game {
pub fn new(intended_word: String) -> Game {
let guessed = intended_word.chars().map(|ch| CharState::new(ch, false)).collect();
Game {
intended_word: intended_word,
guessed : guessed,
}
}
pub fn intended_word<'r>(&'r self) -> &'r str {
&self.intended_word[]
}
pub fn current_progress(&self) -> CurrentProgress {
CurrentProgress { vec : self.guessed.iter().map(|x| x.to_enum()).collect() }
}
pub fn letters_left(&self) -> usize {
self.guessed.iter().filter(|&x|!x.is_guessed).count()
}
pub fn guess(&mut self, guess : &str) {
for ch in guess.chars() {
self.guess_char(ch);
}
}
fn guess_char(&mut self, ch : char) {
for g_res in self.guessed.iter_mut().filter(|x| x.ch==ch) {
g_res.is_guessed = true;
}
}
}
struct | {
ch : char,
is_guessed : bool,
}
impl CharState {
fn new(ch : char, is_guessed : bool) -> CharState {
CharState {
ch : ch,
is_guessed : is_guessed,
}
}
fn to_enum(&self) -> Guessed {
if self.is_guessed {
Some(self.ch)
} else {
None
}
}
}
pub type Guessed = Option<char>;
#[derive(Debug)]
pub struct CurrentProgress {
vec : Vec<Guessed>,
}
impl Display for CurrentProgress {
fn fmt(&self, f : &mut Formatter) -> Result<(), Error> {
let mut result = String::with_capacity(self.vec.len() * 2);
for guess in self.vec.iter() {
match *guess {
Some(ch) => result.push(ch),
None => result.push('_'),
}
result.push(' ');
}
write!(f, "{}", result.as_slice())
}
}
| CharState | identifier_name |
lib.rs | use std::fmt::{Display, Error, Formatter};
pub struct Game {
intended_word: String,
guessed: Vec<CharState>,
}
impl Game {
pub fn new(intended_word: String) -> Game {
let guessed = intended_word.chars().map(|ch| CharState::new(ch, false)).collect();
Game {
intended_word: intended_word,
guessed : guessed,
}
}
pub fn intended_word<'r>(&'r self) -> &'r str {
&self.intended_word[]
}
pub fn current_progress(&self) -> CurrentProgress {
CurrentProgress { vec : self.guessed.iter().map(|x| x.to_enum()).collect() }
}
pub fn letters_left(&self) -> usize {
self.guessed.iter().filter(|&x|!x.is_guessed).count()
}
pub fn guess(&mut self, guess : &str) {
for ch in guess.chars() {
self.guess_char(ch);
}
}
fn guess_char(&mut self, ch : char) {
for g_res in self.guessed.iter_mut().filter(|x| x.ch==ch) {
g_res.is_guessed = true;
}
}
}
struct CharState {
ch : char,
is_guessed : bool,
}
impl CharState {
fn new(ch : char, is_guessed : bool) -> CharState |
fn to_enum(&self) -> Guessed {
if self.is_guessed {
Some(self.ch)
} else {
None
}
}
}
pub type Guessed = Option<char>;
#[derive(Debug)]
pub struct CurrentProgress {
vec : Vec<Guessed>,
}
impl Display for CurrentProgress {
fn fmt(&self, f : &mut Formatter) -> Result<(), Error> {
let mut result = String::with_capacity(self.vec.len() * 2);
for guess in self.vec.iter() {
match *guess {
Some(ch) => result.push(ch),
None => result.push('_'),
}
result.push(' ');
}
write!(f, "{}", result.as_slice())
}
}
| {
CharState {
ch : ch,
is_guessed : is_guessed,
}
} | identifier_body |
read_message.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use futures::{Poll, Future, Async};
use tokio_io::AsyncRead;
use ethkey::KeyPair;
use key_server_cluster::Error;
use key_server_cluster::message::Message;
use key_server_cluster::io::{read_header, ReadHeader, read_payload, read_encrypted_payload, ReadPayload};
/// Create future for read single message from the stream.
pub fn read_message<A>(a: A) -> ReadMessage<A> where A: AsyncRead {
ReadMessage {
key: None,
state: ReadMessageState::ReadHeader(read_header(a)),
}
}
/// Create future for read single encrypted message from the stream.
pub fn read_encrypted_message<A>(a: A, key: KeyPair) -> ReadMessage<A> where A: AsyncRead {
ReadMessage {
key: Some(key),
state: ReadMessageState::ReadHeader(read_header(a)),
}
}
enum | <A> {
ReadHeader(ReadHeader<A>),
ReadPayload(ReadPayload<A>),
Finished,
}
/// Future for read single message from the stream.
pub struct ReadMessage<A> {
key: Option<KeyPair>,
state: ReadMessageState<A>,
}
impl<A> Future for ReadMessage<A> where A: AsyncRead {
type Item = (A, Result<Message, Error>);
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let (next, result) = match self.state {
ReadMessageState::ReadHeader(ref mut future) => {
let (read, header) = try_ready!(future.poll());
let header = match header {
Ok(header) => header,
Err(err) => return Ok((read, Err(err)).into()),
};
let future = match self.key.take() {
Some(key) => read_encrypted_payload(read, header, key),
None => read_payload(read, header),
};
let next = ReadMessageState::ReadPayload(future);
(next, Async::NotReady)
},
ReadMessageState::ReadPayload(ref mut future) => {
let (read, payload) = try_ready!(future.poll());
(ReadMessageState::Finished, Async::Ready((read, payload)))
},
ReadMessageState::Finished => panic!("poll ReadMessage after it's done"),
};
self.state = next;
match result {
// by polling again, we register new future
Async::NotReady => self.poll(),
result => Ok(result)
}
}
}
| ReadMessageState | identifier_name |
read_message.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use futures::{Poll, Future, Async};
use tokio_io::AsyncRead;
use ethkey::KeyPair;
use key_server_cluster::Error;
use key_server_cluster::message::Message;
use key_server_cluster::io::{read_header, ReadHeader, read_payload, read_encrypted_payload, ReadPayload};
/// Create future for read single message from the stream.
pub fn read_message<A>(a: A) -> ReadMessage<A> where A: AsyncRead {
ReadMessage {
key: None,
state: ReadMessageState::ReadHeader(read_header(a)),
}
}
/// Create future for read single encrypted message from the stream.
pub fn read_encrypted_message<A>(a: A, key: KeyPair) -> ReadMessage<A> where A: AsyncRead {
ReadMessage {
key: Some(key),
state: ReadMessageState::ReadHeader(read_header(a)),
}
}
enum ReadMessageState<A> {
ReadHeader(ReadHeader<A>),
ReadPayload(ReadPayload<A>),
Finished,
}
/// Future for read single message from the stream.
pub struct ReadMessage<A> {
key: Option<KeyPair>,
state: ReadMessageState<A>,
}
impl<A> Future for ReadMessage<A> where A: AsyncRead {
type Item = (A, Result<Message, Error>);
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let (next, result) = match self.state {
ReadMessageState::ReadHeader(ref mut future) => {
let (read, header) = try_ready!(future.poll());
let header = match header {
Ok(header) => header,
Err(err) => return Ok((read, Err(err)).into()),
};
let future = match self.key.take() {
Some(key) => read_encrypted_payload(read, header, key),
None => read_payload(read, header),
};
let next = ReadMessageState::ReadPayload(future);
(next, Async::NotReady)
},
ReadMessageState::ReadPayload(ref mut future) => {
let (read, payload) = try_ready!(future.poll());
(ReadMessageState::Finished, Async::Ready((read, payload)))
},
ReadMessageState::Finished => panic!("poll ReadMessage after it's done"),
}; | // by polling again, we register new future
Async::NotReady => self.poll(),
result => Ok(result)
}
}
} |
self.state = next;
match result { | random_line_split |
read_message.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use futures::{Poll, Future, Async};
use tokio_io::AsyncRead;
use ethkey::KeyPair;
use key_server_cluster::Error;
use key_server_cluster::message::Message;
use key_server_cluster::io::{read_header, ReadHeader, read_payload, read_encrypted_payload, ReadPayload};
/// Create future for read single message from the stream.
pub fn read_message<A>(a: A) -> ReadMessage<A> where A: AsyncRead {
ReadMessage {
key: None,
state: ReadMessageState::ReadHeader(read_header(a)),
}
}
/// Create future for read single encrypted message from the stream.
pub fn read_encrypted_message<A>(a: A, key: KeyPair) -> ReadMessage<A> where A: AsyncRead {
ReadMessage {
key: Some(key),
state: ReadMessageState::ReadHeader(read_header(a)),
}
}
enum ReadMessageState<A> {
ReadHeader(ReadHeader<A>),
ReadPayload(ReadPayload<A>),
Finished,
}
/// Future for read single message from the stream.
pub struct ReadMessage<A> {
key: Option<KeyPair>,
state: ReadMessageState<A>,
}
impl<A> Future for ReadMessage<A> where A: AsyncRead {
type Item = (A, Result<Message, Error>);
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let (next, result) = match self.state {
ReadMessageState::ReadHeader(ref mut future) => {
let (read, header) = try_ready!(future.poll());
let header = match header {
Ok(header) => header,
Err(err) => return Ok((read, Err(err)).into()),
};
let future = match self.key.take() {
Some(key) => read_encrypted_payload(read, header, key),
None => read_payload(read, header),
};
let next = ReadMessageState::ReadPayload(future);
(next, Async::NotReady)
},
ReadMessageState::ReadPayload(ref mut future) => | ,
ReadMessageState::Finished => panic!("poll ReadMessage after it's done"),
};
self.state = next;
match result {
// by polling again, we register new future
Async::NotReady => self.poll(),
result => Ok(result)
}
}
}
| {
let (read, payload) = try_ready!(future.poll());
(ReadMessageState::Finished, Async::Ready((read, payload)))
} | conditional_block |
mod.rs | //! First set construction and computation.
use collections::{map, Map};
use grammar::repr::*;
use lr1::lookahead::{Token, TokenSet};
#[cfg(test)]
mod test;
#[derive(Clone)]
pub struct FirstSets {
map: Map<NonterminalString, TokenSet>,
}
impl FirstSets {
pub fn new(grammar: &Grammar) -> FirstSets {
let mut this = FirstSets { map: map() };
let mut changed = true;
while changed {
changed = false;
for production in grammar.nonterminals.values().flat_map(|p| &p.productions) {
let nt = &production.nonterminal;
let lookahead = this.first0(&production.symbols);
let first_set = this
.map
.entry(nt.clone())
.or_insert_with(|| TokenSet::new());
changed |= first_set.union_with(&lookahead);
}
}
this
}
/// Returns `FIRST(...symbols)`. If `...symbols` may derive
/// epsilon, then this returned set will include EOF. (This is
/// kind of repurposing EOF to serve as a binary flag of sorts.)
pub fn first0<'s, I>(&self, symbols: I) -> TokenSet
where
I: IntoIterator<Item = &'s Symbol>,
{
let mut result = TokenSet::new();
for symbol in symbols {
match *symbol {
Symbol::Terminal(ref t) => {
result.insert(Token::Terminal(t.clone()));
return result;
}
Symbol::Nonterminal(ref nt) => {
let mut empty_prod = false;
match self.map.get(nt) {
None => {
// This should only happen during set
// construction; it corresponds to an
// entry that has not yet been
// built. Otherwise, it would mean a
// terminal with no productions. Either
// way, the resulting first set should be
// empty.
}
Some(set) => {
for lookahead in set.iter() {
match lookahead {
Token::EOF => {
empty_prod = true;
}
Token::Error | Token::Terminal(_) => {
result.insert(lookahead);
}
}
}
}
}
if!empty_prod {
return result;
}
}
}
}
// control only reaches here if either symbols is empty, or it
// consists of nonterminals all of which may derive epsilon
result.insert(Token::EOF);
result
}
pub fn first1(&self, symbols: &[Symbol], lookahead: &TokenSet) -> TokenSet {
let mut set = self.first0(symbols);
// we use EOF as the signal that `symbols` derives epsilon:
let epsilon = set.take_eof();
if epsilon |
set
}
}
| {
set.union_with(&lookahead);
} | conditional_block |
mod.rs | //! First set construction and computation.
use collections::{map, Map};
use grammar::repr::*;
use lr1::lookahead::{Token, TokenSet};
#[cfg(test)]
mod test;
#[derive(Clone)]
pub struct FirstSets {
map: Map<NonterminalString, TokenSet>,
}
impl FirstSets {
pub fn new(grammar: &Grammar) -> FirstSets |
/// Returns `FIRST(...symbols)`. If `...symbols` may derive
/// epsilon, then this returned set will include EOF. (This is
/// kind of repurposing EOF to serve as a binary flag of sorts.)
pub fn first0<'s, I>(&self, symbols: I) -> TokenSet
where
I: IntoIterator<Item = &'s Symbol>,
{
let mut result = TokenSet::new();
for symbol in symbols {
match *symbol {
Symbol::Terminal(ref t) => {
result.insert(Token::Terminal(t.clone()));
return result;
}
Symbol::Nonterminal(ref nt) => {
let mut empty_prod = false;
match self.map.get(nt) {
None => {
// This should only happen during set
// construction; it corresponds to an
// entry that has not yet been
// built. Otherwise, it would mean a
// terminal with no productions. Either
// way, the resulting first set should be
// empty.
}
Some(set) => {
for lookahead in set.iter() {
match lookahead {
Token::EOF => {
empty_prod = true;
}
Token::Error | Token::Terminal(_) => {
result.insert(lookahead);
}
}
}
}
}
if!empty_prod {
return result;
}
}
}
}
// control only reaches here if either symbols is empty, or it
// consists of nonterminals all of which may derive epsilon
result.insert(Token::EOF);
result
}
pub fn first1(&self, symbols: &[Symbol], lookahead: &TokenSet) -> TokenSet {
let mut set = self.first0(symbols);
// we use EOF as the signal that `symbols` derives epsilon:
let epsilon = set.take_eof();
if epsilon {
set.union_with(&lookahead);
}
set
}
}
| {
let mut this = FirstSets { map: map() };
let mut changed = true;
while changed {
changed = false;
for production in grammar.nonterminals.values().flat_map(|p| &p.productions) {
let nt = &production.nonterminal;
let lookahead = this.first0(&production.symbols);
let first_set = this
.map
.entry(nt.clone())
.or_insert_with(|| TokenSet::new());
changed |= first_set.union_with(&lookahead);
}
}
this
} | identifier_body |
mod.rs | //! First set construction and computation.
use collections::{map, Map};
use grammar::repr::*;
use lr1::lookahead::{Token, TokenSet};
#[cfg(test)]
mod test;
#[derive(Clone)]
pub struct FirstSets {
map: Map<NonterminalString, TokenSet>,
}
impl FirstSets {
pub fn new(grammar: &Grammar) -> FirstSets {
let mut this = FirstSets { map: map() };
let mut changed = true;
while changed {
changed = false;
for production in grammar.nonterminals.values().flat_map(|p| &p.productions) {
let nt = &production.nonterminal;
let lookahead = this.first0(&production.symbols);
let first_set = this
.map
.entry(nt.clone())
.or_insert_with(|| TokenSet::new());
changed |= first_set.union_with(&lookahead);
}
}
this
}
/// Returns `FIRST(...symbols)`. If `...symbols` may derive
/// epsilon, then this returned set will include EOF. (This is
/// kind of repurposing EOF to serve as a binary flag of sorts.)
pub fn first0<'s, I>(&self, symbols: I) -> TokenSet
where
I: IntoIterator<Item = &'s Symbol>,
{
let mut result = TokenSet::new();
for symbol in symbols {
match *symbol {
Symbol::Terminal(ref t) => {
result.insert(Token::Terminal(t.clone()));
return result;
}
Symbol::Nonterminal(ref nt) => {
let mut empty_prod = false;
match self.map.get(nt) {
None => {
// This should only happen during set
// construction; it corresponds to an
// entry that has not yet been
// built. Otherwise, it would mean a
// terminal with no productions. Either
// way, the resulting first set should be
// empty.
}
Some(set) => {
for lookahead in set.iter() {
match lookahead {
Token::EOF => {
empty_prod = true;
}
Token::Error | Token::Terminal(_) => {
result.insert(lookahead);
}
}
}
}
}
if!empty_prod {
return result;
}
}
}
}
// control only reaches here if either symbols is empty, or it
// consists of nonterminals all of which may derive epsilon
result.insert(Token::EOF);
result
}
pub fn | (&self, symbols: &[Symbol], lookahead: &TokenSet) -> TokenSet {
let mut set = self.first0(symbols);
// we use EOF as the signal that `symbols` derives epsilon:
let epsilon = set.take_eof();
if epsilon {
set.union_with(&lookahead);
}
set
}
}
| first1 | identifier_name |
mod.rs | //! First set construction and computation.
use collections::{map, Map};
use grammar::repr::*;
use lr1::lookahead::{Token, TokenSet};
#[cfg(test)]
mod test;
#[derive(Clone)]
pub struct FirstSets {
map: Map<NonterminalString, TokenSet>,
}
impl FirstSets {
pub fn new(grammar: &Grammar) -> FirstSets {
let mut this = FirstSets { map: map() };
let mut changed = true;
while changed {
changed = false;
for production in grammar.nonterminals.values().flat_map(|p| &p.productions) {
let nt = &production.nonterminal;
let lookahead = this.first0(&production.symbols);
let first_set = this
.map
.entry(nt.clone())
.or_insert_with(|| TokenSet::new());
changed |= first_set.union_with(&lookahead);
}
}
this
}
/// Returns `FIRST(...symbols)`. If `...symbols` may derive
/// epsilon, then this returned set will include EOF. (This is
/// kind of repurposing EOF to serve as a binary flag of sorts.)
pub fn first0<'s, I>(&self, symbols: I) -> TokenSet
where
I: IntoIterator<Item = &'s Symbol>,
{
let mut result = TokenSet::new();
for symbol in symbols {
match *symbol {
Symbol::Terminal(ref t) => {
result.insert(Token::Terminal(t.clone()));
return result;
}
Symbol::Nonterminal(ref nt) => {
let mut empty_prod = false;
match self.map.get(nt) {
None => {
// This should only happen during set
// construction; it corresponds to an
// entry that has not yet been
// built. Otherwise, it would mean a
// terminal with no productions. Either
// way, the resulting first set should be
// empty.
}
Some(set) => {
for lookahead in set.iter() {
match lookahead {
Token::EOF => {
empty_prod = true;
}
Token::Error | Token::Terminal(_) => {
result.insert(lookahead);
}
}
}
}
}
if!empty_prod {
return result;
}
}
}
}
// control only reaches here if either symbols is empty, or it
// consists of nonterminals all of which may derive epsilon
result.insert(Token::EOF);
result
}
pub fn first1(&self, symbols: &[Symbol], lookahead: &TokenSet) -> TokenSet {
let mut set = self.first0(symbols);
// we use EOF as the signal that `symbols` derives epsilon:
let epsilon = set.take_eof();
if epsilon {
set.union_with(&lookahead);
}
set
} | } | random_line_split |
|
schema_tests.rs | //!
//! Test Schema Loading, XML Validating
//!
use libxml::schemas::SchemaParserContext;
use libxml::schemas::SchemaValidationContext;
use libxml::parser::Parser;
static SCHEMA: &'static str = r#"<?xml version="1.0"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="note">
<xs:complexType>
<xs:sequence>
<xs:element name="to" type="xs:string"/>
<xs:element name="from" type="xs:string"/>
<xs:element name="heading" type="xs:string"/>
<xs:element name="body" type="xs:string"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>
"#;
static XML: &'static str = r#"<?xml version="1.0"?>
<note>
<to>Tove</to>
<from>Jani</from>
<heading>Reminder</heading>
<body>Don't forget me this weekend!</body>
</note>
"#;
static INVALID_XML: &'static str = r#"<?xml version="1.0"?>
<note>
<bad>Tove</bad>
<another>Jani</another>
<heading>Reminder</heading>
<body>Don't forget me this weekend!</body>
</note>
"#;
#[test]
fn schema_from_string() {
let xml = Parser::default()
.parse_string(XML)
.expect("Expected to be able to parse XML Document from string");
let mut xsdparser = SchemaParserContext::from_buffer(SCHEMA);
let xsd = SchemaValidationContext::from_parser(&mut xsdparser);
if let Err(errors) = xsd {
for err in &errors {
println!("{}", err.message());
}
panic!("Failed to parse schema");
}
let mut xsdvalidator = xsd.unwrap();
// loop over more than one validation to test for leaks in the error handling callback interactions
for _ in 0..5 {
if let Err(errors) = xsdvalidator.validate_document(&xml) {
for err in &errors {
println!("{}", err.message());
}
panic!("Invalid XML accoding to XSD schema");
}
}
}
#[test]
fn | () {
let xml = Parser::default()
.parse_string(INVALID_XML)
.expect("Expected to be able to parse XML Document from string");
let mut xsdparser = SchemaParserContext::from_buffer(SCHEMA);
let xsd = SchemaValidationContext::from_parser(&mut xsdparser);
if let Err(errors) = xsd {
for err in &errors {
println!("{}", err.message());
}
panic!("Failed to parse schema");
}
let mut xsdvalidator = xsd.unwrap();
for _ in 0..5 {
if let Err(errors) = xsdvalidator.validate_document(&xml) {
for err in &errors {
assert_eq!(
"Element 'bad': This element is not expected. Expected is ( to ).\n",
err.message()
);
}
}
}
}
| schema_from_string_generates_errors | identifier_name |
schema_tests.rs | //!
//! Test Schema Loading, XML Validating
//!
use libxml::schemas::SchemaParserContext;
use libxml::schemas::SchemaValidationContext;
use libxml::parser::Parser;
static SCHEMA: &'static str = r#"<?xml version="1.0"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="note">
<xs:complexType>
<xs:sequence>
<xs:element name="to" type="xs:string"/>
<xs:element name="from" type="xs:string"/>
<xs:element name="heading" type="xs:string"/>
<xs:element name="body" type="xs:string"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>
"#;
static XML: &'static str = r#"<?xml version="1.0"?>
<note>
<to>Tove</to>
<from>Jani</from>
<heading>Reminder</heading>
<body>Don't forget me this weekend!</body>
</note>
"#;
static INVALID_XML: &'static str = r#"<?xml version="1.0"?>
<note>
<bad>Tove</bad>
<another>Jani</another>
<heading>Reminder</heading>
<body>Don't forget me this weekend!</body>
</note>
"#;
#[test]
fn schema_from_string() {
let xml = Parser::default()
.parse_string(XML)
.expect("Expected to be able to parse XML Document from string");
let mut xsdparser = SchemaParserContext::from_buffer(SCHEMA);
let xsd = SchemaValidationContext::from_parser(&mut xsdparser);
if let Err(errors) = xsd {
for err in &errors {
println!("{}", err.message());
}
panic!("Failed to parse schema");
}
let mut xsdvalidator = xsd.unwrap();
// loop over more than one validation to test for leaks in the error handling callback interactions
for _ in 0..5 {
if let Err(errors) = xsdvalidator.validate_document(&xml) {
for err in &errors {
println!("{}", err.message());
}
panic!("Invalid XML accoding to XSD schema");
}
}
}
#[test]
fn schema_from_string_generates_errors() | assert_eq!(
"Element 'bad': This element is not expected. Expected is ( to ).\n",
err.message()
);
}
}
}
}
| {
let xml = Parser::default()
.parse_string(INVALID_XML)
.expect("Expected to be able to parse XML Document from string");
let mut xsdparser = SchemaParserContext::from_buffer(SCHEMA);
let xsd = SchemaValidationContext::from_parser(&mut xsdparser);
if let Err(errors) = xsd {
for err in &errors {
println!("{}", err.message());
}
panic!("Failed to parse schema");
}
let mut xsdvalidator = xsd.unwrap();
for _ in 0..5 {
if let Err(errors) = xsdvalidator.validate_document(&xml) {
for err in &errors { | identifier_body |
schema_tests.rs | //!
//! Test Schema Loading, XML Validating
//!
use libxml::schemas::SchemaParserContext;
use libxml::schemas::SchemaValidationContext;
use libxml::parser::Parser;
static SCHEMA: &'static str = r#"<?xml version="1.0"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="note">
<xs:complexType>
<xs:sequence>
<xs:element name="to" type="xs:string"/>
<xs:element name="from" type="xs:string"/>
<xs:element name="heading" type="xs:string"/>
<xs:element name="body" type="xs:string"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>
"#;
static XML: &'static str = r#"<?xml version="1.0"?>
<note>
<to>Tove</to>
<from>Jani</from>
<heading>Reminder</heading>
<body>Don't forget me this weekend!</body>
</note>
"#;
static INVALID_XML: &'static str = r#"<?xml version="1.0"?>
<note>
<bad>Tove</bad>
<another>Jani</another>
<heading>Reminder</heading>
<body>Don't forget me this weekend!</body>
</note>
"#;
#[test]
fn schema_from_string() {
let xml = Parser::default()
.parse_string(XML)
.expect("Expected to be able to parse XML Document from string");
let mut xsdparser = SchemaParserContext::from_buffer(SCHEMA);
let xsd = SchemaValidationContext::from_parser(&mut xsdparser);
if let Err(errors) = xsd {
for err in &errors {
println!("{}", err.message());
}
panic!("Failed to parse schema");
}
let mut xsdvalidator = xsd.unwrap();
// loop over more than one validation to test for leaks in the error handling callback interactions
for _ in 0..5 {
if let Err(errors) = xsdvalidator.validate_document(&xml) {
for err in &errors {
println!("{}", err.message());
}
panic!("Invalid XML accoding to XSD schema");
}
}
}
#[test]
fn schema_from_string_generates_errors() {
let xml = Parser::default()
.parse_string(INVALID_XML)
.expect("Expected to be able to parse XML Document from string");
let mut xsdparser = SchemaParserContext::from_buffer(SCHEMA);
let xsd = SchemaValidationContext::from_parser(&mut xsdparser);
if let Err(errors) = xsd |
let mut xsdvalidator = xsd.unwrap();
for _ in 0..5 {
if let Err(errors) = xsdvalidator.validate_document(&xml) {
for err in &errors {
assert_eq!(
"Element 'bad': This element is not expected. Expected is ( to ).\n",
err.message()
);
}
}
}
}
| {
for err in &errors {
println!("{}", err.message());
}
panic!("Failed to parse schema");
} | conditional_block |
schema_tests.rs | //!
//! Test Schema Loading, XML Validating
//!
use libxml::schemas::SchemaParserContext;
use libxml::schemas::SchemaValidationContext;
use libxml::parser::Parser;
static SCHEMA: &'static str = r#"<?xml version="1.0"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="note">
<xs:complexType>
<xs:sequence>
<xs:element name="to" type="xs:string"/>
<xs:element name="from" type="xs:string"/>
<xs:element name="heading" type="xs:string"/>
<xs:element name="body" type="xs:string"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>
"#;
static XML: &'static str = r#"<?xml version="1.0"?>
<note>
<to>Tove</to>
<from>Jani</from>
<heading>Reminder</heading>
<body>Don't forget me this weekend!</body>
</note>
"#;
static INVALID_XML: &'static str = r#"<?xml version="1.0"?>
<note>
<bad>Tove</bad>
<another>Jani</another>
<heading>Reminder</heading>
<body>Don't forget me this weekend!</body>
</note>
"#;
#[test]
fn schema_from_string() {
let xml = Parser::default()
.parse_string(XML)
.expect("Expected to be able to parse XML Document from string");
let mut xsdparser = SchemaParserContext::from_buffer(SCHEMA);
let xsd = SchemaValidationContext::from_parser(&mut xsdparser);
if let Err(errors) = xsd {
for err in &errors {
println!("{}", err.message());
}
panic!("Failed to parse schema");
}
let mut xsdvalidator = xsd.unwrap();
// loop over more than one validation to test for leaks in the error handling callback interactions
for _ in 0..5 {
if let Err(errors) = xsdvalidator.validate_document(&xml) {
for err in &errors {
println!("{}", err.message());
}
panic!("Invalid XML accoding to XSD schema");
}
}
}
#[test]
fn schema_from_string_generates_errors() {
let xml = Parser::default()
.parse_string(INVALID_XML)
.expect("Expected to be able to parse XML Document from string");
let mut xsdparser = SchemaParserContext::from_buffer(SCHEMA);
let xsd = SchemaValidationContext::from_parser(&mut xsdparser);
if let Err(errors) = xsd {
for err in &errors {
println!("{}", err.message());
}
panic!("Failed to parse schema"); | if let Err(errors) = xsdvalidator.validate_document(&xml) {
for err in &errors {
assert_eq!(
"Element 'bad': This element is not expected. Expected is ( to ).\n",
err.message()
);
}
}
}
} | }
let mut xsdvalidator = xsd.unwrap();
for _ in 0..5 { | random_line_split |
error.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use hir::def_id::DefId;
use ty::{self, BoundRegion, Region, Ty, TyCtxt};
use std::borrow::Cow;
use std::fmt;
use rustc_target::spec::abi;
use syntax::ast;
use errors::{Applicability, DiagnosticBuilder};
use syntax_pos::Span;
use hir;
#[derive(Clone, Copy, Debug)]
pub struct ExpectedFound<T> {
pub expected: T,
pub found: T,
}
// Data structures used in type unification
#[derive(Clone, Debug)]
pub enum TypeError<'tcx> {
Mismatch,
UnsafetyMismatch(ExpectedFound<hir::Unsafety>),
AbiMismatch(ExpectedFound<abi::Abi>),
Mutability,
TupleSize(ExpectedFound<usize>),
FixedArraySize(ExpectedFound<u64>),
ArgCount,
RegionsDoesNotOutlive(Region<'tcx>, Region<'tcx>),
RegionsInsufficientlyPolymorphic(BoundRegion, Region<'tcx>),
RegionsOverlyPolymorphic(BoundRegion, Region<'tcx>),
Sorts(ExpectedFound<Ty<'tcx>>),
IntMismatch(ExpectedFound<ty::IntVarValue>),
FloatMismatch(ExpectedFound<ast::FloatTy>),
Traits(ExpectedFound<DefId>),
VariadicMismatch(ExpectedFound<bool>),
/// Instantiating a type variable with the given type would have
/// created a cycle (because it appears somewhere within that
/// type).
CyclicTy(Ty<'tcx>),
ProjectionMismatched(ExpectedFound<DefId>),
ProjectionBoundsLength(ExpectedFound<usize>),
ExistentialMismatch(ExpectedFound<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>),
}
#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Eq, Hash, Debug, Copy)]
pub enum UnconstrainedNumeric {
UnconstrainedFloat,
UnconstrainedInt,
Neither,
}
/// Explains the source of a type err in a short, human readable way. This is meant to be placed
/// in parentheses after some larger message. You should also invoke `note_and_explain_type_err()`
/// afterwards to present additional details, particularly when it comes to lifetime-related
/// errors.
impl<'tcx> fmt::Display for TypeError<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use self::TypeError::*;
fn report_maybe_different(f: &mut fmt::Formatter<'_>,
expected: &str, found: &str) -> fmt::Result {
// A naive approach to making sure that we're not reporting silly errors such as:
// (expected closure, found closure).
if expected == found {
write!(f, "expected {}, found a different {}", expected, found)
} else {
write!(f, "expected {}, found {}", expected, found)
}
}
match *self {
CyclicTy(_) => write!(f, "cyclic type of infinite size"),
Mismatch => write!(f, "types differ"),
UnsafetyMismatch(values) => {
write!(f, "expected {} fn, found {} fn",
values.expected,
values.found)
}
AbiMismatch(values) => {
write!(f, "expected {} fn, found {} fn",
values.expected,
values.found)
}
Mutability => write!(f, "types differ in mutability"),
FixedArraySize(values) => {
write!(f, "expected an array with a fixed size of {} elements, \
found one with {} elements",
values.expected,
values.found)
}
TupleSize(values) => {
write!(f, "expected a tuple with {} elements, \
found one with {} elements",
values.expected,
values.found)
}
ArgCount => {
write!(f, "incorrect number of function parameters")
}
RegionsDoesNotOutlive(..) => |
RegionsInsufficientlyPolymorphic(br, _) => {
write!(f,
"expected bound lifetime parameter{}{}, found concrete lifetime",
if br.is_named() { " " } else { "" },
br)
}
RegionsOverlyPolymorphic(br, _) => {
write!(f,
"expected concrete lifetime, found bound lifetime parameter{}{}",
if br.is_named() { " " } else { "" },
br)
}
Sorts(values) => ty::tls::with(|tcx| {
report_maybe_different(f, &values.expected.sort_string(tcx),
&values.found.sort_string(tcx))
}),
Traits(values) => ty::tls::with(|tcx| {
report_maybe_different(f,
&format!("trait `{}`",
tcx.item_path_str(values.expected)),
&format!("trait `{}`",
tcx.item_path_str(values.found)))
}),
IntMismatch(ref values) => {
write!(f, "expected `{:?}`, found `{:?}`",
values.expected,
values.found)
}
FloatMismatch(ref values) => {
write!(f, "expected `{:?}`, found `{:?}`",
values.expected,
values.found)
}
VariadicMismatch(ref values) => {
write!(f, "expected {} fn, found {} function",
if values.expected { "variadic" } else { "non-variadic" },
if values.found { "variadic" } else { "non-variadic" })
}
ProjectionMismatched(ref values) => ty::tls::with(|tcx| {
write!(f, "expected {}, found {}",
tcx.item_path_str(values.expected),
tcx.item_path_str(values.found))
}),
ProjectionBoundsLength(ref values) => {
write!(f, "expected {} associated type bindings, found {}",
values.expected,
values.found)
},
ExistentialMismatch(ref values) => {
report_maybe_different(f, &format!("trait `{}`", values.expected),
&format!("trait `{}`", values.found))
}
}
}
}
impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> {
pub fn sort_string(&self, tcx: TyCtxt<'a, 'gcx, 'lcx>) -> Cow<'static, str> {
match self.sty {
ty::Bool | ty::Char | ty::Int(_) |
ty::Uint(_) | ty::Float(_) | ty::Str | ty::Never => self.to_string().into(),
ty::Tuple(ref tys) if tys.is_empty() => self.to_string().into(),
ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.item_path_str(def.did)).into(),
ty::Foreign(def_id) => format!("extern type `{}`", tcx.item_path_str(def_id)).into(),
ty::Array(_, n) => {
match n.assert_usize(tcx) {
Some(n) => format!("array of {} elements", n).into(),
None => "array".into(),
}
}
ty::Slice(_) => "slice".into(),
ty::RawPtr(_) => "*-ptr".into(),
ty::Ref(region, ty, mutbl) => {
let tymut = ty::TypeAndMut { ty, mutbl };
let tymut_string = tymut.to_string();
if tymut_string == "_" || //unknown type name,
tymut_string.len() > 10 || //name longer than saying "reference",
region.to_string()!= "" //... or a complex type
{
format!("{}reference", match mutbl {
hir::Mutability::MutMutable => "mutable ",
_ => ""
}).into()
} else {
format!("&{}", tymut_string).into()
}
}
ty::FnDef(..) => "fn item".into(),
ty::FnPtr(_) => "fn pointer".into(),
ty::Dynamic(ref inner,..) => {
format!("trait {}", tcx.item_path_str(inner.principal().def_id())).into()
}
ty::Closure(..) => "closure".into(),
ty::Generator(..) => "generator".into(),
ty::GeneratorWitness(..) => "generator witness".into(),
ty::Tuple(..) => "tuple".into(),
ty::Infer(ty::TyVar(_)) => "inferred type".into(),
ty::Infer(ty::IntVar(_)) => "integral variable".into(),
ty::Infer(ty::FloatVar(_)) => "floating-point variable".into(),
ty::Placeholder(..) => "placeholder type".into(),
ty::Bound(..) => "bound type".into(),
ty::Infer(ty::FreshTy(_)) => "fresh type".into(),
ty::Infer(ty::FreshIntTy(_)) => "fresh integral type".into(),
ty::Infer(ty::FreshFloatTy(_)) => "fresh floating-point type".into(),
ty::Projection(_) => "associated type".into(),
ty::UnnormalizedProjection(_) => "non-normalized associated type".into(),
ty::Param(ref p) => {
if p.is_self() {
"Self".into()
} else {
"type parameter".into()
}
}
ty::Opaque(..) => "opaque type".into(),
ty::Error => "type error".into(),
}
}
}
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
pub fn note_and_explain_type_err(self,
db: &mut DiagnosticBuilder<'_>,
err: &TypeError<'tcx>,
sp: Span) {
use self::TypeError::*;
match err.clone() {
Sorts(values) => {
let expected_str = values.expected.sort_string(self);
let found_str = values.found.sort_string(self);
if expected_str == found_str && expected_str == "closure" {
db.note("no two closures, even if identical, have the same type");
db.help("consider boxing your closure and/or using it as a trait object");
}
if let (ty::Infer(ty::IntVar(_)), ty::Float(_)) =
(&values.found.sty, &values.expected.sty) // Issue #53280
{
if let Ok(snippet) = self.sess.source_map().span_to_snippet(sp) {
if snippet.chars().all(|c| c.is_digit(10) || c == '-' || c == '_') {
db.span_suggestion_with_applicability(
sp,
"use a float literal",
format!("{}.0", snippet),
Applicability::MachineApplicable
);
}
}
}
},
CyclicTy(ty) => {
// Watch out for various cases of cyclic types and try to explain.
if ty.is_closure() || ty.is_generator() {
db.note("closures cannot capture themselves or take themselves as argument;\n\
this error may be the result of a recent compiler bug-fix,\n\
see https://github.com/rust-lang/rust/issues/46062 for more details");
}
}
_ => {}
}
}
}
| {
write!(f, "lifetime mismatch")
} | conditional_block |
error.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use hir::def_id::DefId;
use ty::{self, BoundRegion, Region, Ty, TyCtxt};
use std::borrow::Cow;
use std::fmt;
use rustc_target::spec::abi;
use syntax::ast;
use errors::{Applicability, DiagnosticBuilder};
use syntax_pos::Span;
use hir;
#[derive(Clone, Copy, Debug)]
pub struct ExpectedFound<T> {
pub expected: T,
pub found: T,
}
// Data structures used in type unification
#[derive(Clone, Debug)]
pub enum TypeError<'tcx> {
Mismatch,
UnsafetyMismatch(ExpectedFound<hir::Unsafety>),
AbiMismatch(ExpectedFound<abi::Abi>),
Mutability,
TupleSize(ExpectedFound<usize>),
FixedArraySize(ExpectedFound<u64>),
ArgCount,
RegionsDoesNotOutlive(Region<'tcx>, Region<'tcx>),
RegionsInsufficientlyPolymorphic(BoundRegion, Region<'tcx>),
RegionsOverlyPolymorphic(BoundRegion, Region<'tcx>),
Sorts(ExpectedFound<Ty<'tcx>>),
IntMismatch(ExpectedFound<ty::IntVarValue>),
FloatMismatch(ExpectedFound<ast::FloatTy>),
Traits(ExpectedFound<DefId>),
VariadicMismatch(ExpectedFound<bool>),
/// Instantiating a type variable with the given type would have
/// created a cycle (because it appears somewhere within that
/// type).
CyclicTy(Ty<'tcx>),
ProjectionMismatched(ExpectedFound<DefId>),
ProjectionBoundsLength(ExpectedFound<usize>),
ExistentialMismatch(ExpectedFound<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>),
}
#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Eq, Hash, Debug, Copy)]
pub enum UnconstrainedNumeric {
UnconstrainedFloat,
UnconstrainedInt,
Neither,
}
/// Explains the source of a type err in a short, human readable way. This is meant to be placed | /// in parentheses after some larger message. You should also invoke `note_and_explain_type_err()`
/// afterwards to present additional details, particularly when it comes to lifetime-related
/// errors.
impl<'tcx> fmt::Display for TypeError<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use self::TypeError::*;
fn report_maybe_different(f: &mut fmt::Formatter<'_>,
expected: &str, found: &str) -> fmt::Result {
// A naive approach to making sure that we're not reporting silly errors such as:
// (expected closure, found closure).
if expected == found {
write!(f, "expected {}, found a different {}", expected, found)
} else {
write!(f, "expected {}, found {}", expected, found)
}
}
match *self {
CyclicTy(_) => write!(f, "cyclic type of infinite size"),
Mismatch => write!(f, "types differ"),
UnsafetyMismatch(values) => {
write!(f, "expected {} fn, found {} fn",
values.expected,
values.found)
}
AbiMismatch(values) => {
write!(f, "expected {} fn, found {} fn",
values.expected,
values.found)
}
Mutability => write!(f, "types differ in mutability"),
FixedArraySize(values) => {
write!(f, "expected an array with a fixed size of {} elements, \
found one with {} elements",
values.expected,
values.found)
}
TupleSize(values) => {
write!(f, "expected a tuple with {} elements, \
found one with {} elements",
values.expected,
values.found)
}
ArgCount => {
write!(f, "incorrect number of function parameters")
}
RegionsDoesNotOutlive(..) => {
write!(f, "lifetime mismatch")
}
RegionsInsufficientlyPolymorphic(br, _) => {
write!(f,
"expected bound lifetime parameter{}{}, found concrete lifetime",
if br.is_named() { " " } else { "" },
br)
}
RegionsOverlyPolymorphic(br, _) => {
write!(f,
"expected concrete lifetime, found bound lifetime parameter{}{}",
if br.is_named() { " " } else { "" },
br)
}
Sorts(values) => ty::tls::with(|tcx| {
report_maybe_different(f, &values.expected.sort_string(tcx),
&values.found.sort_string(tcx))
}),
Traits(values) => ty::tls::with(|tcx| {
report_maybe_different(f,
&format!("trait `{}`",
tcx.item_path_str(values.expected)),
&format!("trait `{}`",
tcx.item_path_str(values.found)))
}),
IntMismatch(ref values) => {
write!(f, "expected `{:?}`, found `{:?}`",
values.expected,
values.found)
}
FloatMismatch(ref values) => {
write!(f, "expected `{:?}`, found `{:?}`",
values.expected,
values.found)
}
VariadicMismatch(ref values) => {
write!(f, "expected {} fn, found {} function",
if values.expected { "variadic" } else { "non-variadic" },
if values.found { "variadic" } else { "non-variadic" })
}
ProjectionMismatched(ref values) => ty::tls::with(|tcx| {
write!(f, "expected {}, found {}",
tcx.item_path_str(values.expected),
tcx.item_path_str(values.found))
}),
ProjectionBoundsLength(ref values) => {
write!(f, "expected {} associated type bindings, found {}",
values.expected,
values.found)
},
ExistentialMismatch(ref values) => {
report_maybe_different(f, &format!("trait `{}`", values.expected),
&format!("trait `{}`", values.found))
}
}
}
}
impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> {
pub fn sort_string(&self, tcx: TyCtxt<'a, 'gcx, 'lcx>) -> Cow<'static, str> {
match self.sty {
ty::Bool | ty::Char | ty::Int(_) |
ty::Uint(_) | ty::Float(_) | ty::Str | ty::Never => self.to_string().into(),
ty::Tuple(ref tys) if tys.is_empty() => self.to_string().into(),
ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.item_path_str(def.did)).into(),
ty::Foreign(def_id) => format!("extern type `{}`", tcx.item_path_str(def_id)).into(),
ty::Array(_, n) => {
match n.assert_usize(tcx) {
Some(n) => format!("array of {} elements", n).into(),
None => "array".into(),
}
}
ty::Slice(_) => "slice".into(),
ty::RawPtr(_) => "*-ptr".into(),
ty::Ref(region, ty, mutbl) => {
let tymut = ty::TypeAndMut { ty, mutbl };
let tymut_string = tymut.to_string();
if tymut_string == "_" || //unknown type name,
tymut_string.len() > 10 || //name longer than saying "reference",
region.to_string()!= "" //... or a complex type
{
format!("{}reference", match mutbl {
hir::Mutability::MutMutable => "mutable ",
_ => ""
}).into()
} else {
format!("&{}", tymut_string).into()
}
}
ty::FnDef(..) => "fn item".into(),
ty::FnPtr(_) => "fn pointer".into(),
ty::Dynamic(ref inner,..) => {
format!("trait {}", tcx.item_path_str(inner.principal().def_id())).into()
}
ty::Closure(..) => "closure".into(),
ty::Generator(..) => "generator".into(),
ty::GeneratorWitness(..) => "generator witness".into(),
ty::Tuple(..) => "tuple".into(),
ty::Infer(ty::TyVar(_)) => "inferred type".into(),
ty::Infer(ty::IntVar(_)) => "integral variable".into(),
ty::Infer(ty::FloatVar(_)) => "floating-point variable".into(),
ty::Placeholder(..) => "placeholder type".into(),
ty::Bound(..) => "bound type".into(),
ty::Infer(ty::FreshTy(_)) => "fresh type".into(),
ty::Infer(ty::FreshIntTy(_)) => "fresh integral type".into(),
ty::Infer(ty::FreshFloatTy(_)) => "fresh floating-point type".into(),
ty::Projection(_) => "associated type".into(),
ty::UnnormalizedProjection(_) => "non-normalized associated type".into(),
ty::Param(ref p) => {
if p.is_self() {
"Self".into()
} else {
"type parameter".into()
}
}
ty::Opaque(..) => "opaque type".into(),
ty::Error => "type error".into(),
}
}
}
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
pub fn note_and_explain_type_err(self,
db: &mut DiagnosticBuilder<'_>,
err: &TypeError<'tcx>,
sp: Span) {
use self::TypeError::*;
match err.clone() {
Sorts(values) => {
let expected_str = values.expected.sort_string(self);
let found_str = values.found.sort_string(self);
if expected_str == found_str && expected_str == "closure" {
db.note("no two closures, even if identical, have the same type");
db.help("consider boxing your closure and/or using it as a trait object");
}
if let (ty::Infer(ty::IntVar(_)), ty::Float(_)) =
(&values.found.sty, &values.expected.sty) // Issue #53280
{
if let Ok(snippet) = self.sess.source_map().span_to_snippet(sp) {
if snippet.chars().all(|c| c.is_digit(10) || c == '-' || c == '_') {
db.span_suggestion_with_applicability(
sp,
"use a float literal",
format!("{}.0", snippet),
Applicability::MachineApplicable
);
}
}
}
},
CyclicTy(ty) => {
// Watch out for various cases of cyclic types and try to explain.
if ty.is_closure() || ty.is_generator() {
db.note("closures cannot capture themselves or take themselves as argument;\n\
this error may be the result of a recent compiler bug-fix,\n\
see https://github.com/rust-lang/rust/issues/46062 for more details");
}
}
_ => {}
}
}
} | random_line_split |
|
error.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use hir::def_id::DefId;
use ty::{self, BoundRegion, Region, Ty, TyCtxt};
use std::borrow::Cow;
use std::fmt;
use rustc_target::spec::abi;
use syntax::ast;
use errors::{Applicability, DiagnosticBuilder};
use syntax_pos::Span;
use hir;
#[derive(Clone, Copy, Debug)]
pub struct ExpectedFound<T> {
pub expected: T,
pub found: T,
}
// Data structures used in type unification
#[derive(Clone, Debug)]
pub enum TypeError<'tcx> {
Mismatch,
UnsafetyMismatch(ExpectedFound<hir::Unsafety>),
AbiMismatch(ExpectedFound<abi::Abi>),
Mutability,
TupleSize(ExpectedFound<usize>),
FixedArraySize(ExpectedFound<u64>),
ArgCount,
RegionsDoesNotOutlive(Region<'tcx>, Region<'tcx>),
RegionsInsufficientlyPolymorphic(BoundRegion, Region<'tcx>),
RegionsOverlyPolymorphic(BoundRegion, Region<'tcx>),
Sorts(ExpectedFound<Ty<'tcx>>),
IntMismatch(ExpectedFound<ty::IntVarValue>),
FloatMismatch(ExpectedFound<ast::FloatTy>),
Traits(ExpectedFound<DefId>),
VariadicMismatch(ExpectedFound<bool>),
/// Instantiating a type variable with the given type would have
/// created a cycle (because it appears somewhere within that
/// type).
CyclicTy(Ty<'tcx>),
ProjectionMismatched(ExpectedFound<DefId>),
ProjectionBoundsLength(ExpectedFound<usize>),
ExistentialMismatch(ExpectedFound<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>),
}
#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Eq, Hash, Debug, Copy)]
pub enum UnconstrainedNumeric {
UnconstrainedFloat,
UnconstrainedInt,
Neither,
}
/// Explains the source of a type err in a short, human readable way. This is meant to be placed
/// in parentheses after some larger message. You should also invoke `note_and_explain_type_err()`
/// afterwards to present additional details, particularly when it comes to lifetime-related
/// errors.
impl<'tcx> fmt::Display for TypeError<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use self::TypeError::*;
fn report_maybe_different(f: &mut fmt::Formatter<'_>,
expected: &str, found: &str) -> fmt::Result {
// A naive approach to making sure that we're not reporting silly errors such as:
// (expected closure, found closure).
if expected == found {
write!(f, "expected {}, found a different {}", expected, found)
} else {
write!(f, "expected {}, found {}", expected, found)
}
}
match *self {
CyclicTy(_) => write!(f, "cyclic type of infinite size"),
Mismatch => write!(f, "types differ"),
UnsafetyMismatch(values) => {
write!(f, "expected {} fn, found {} fn",
values.expected,
values.found)
}
AbiMismatch(values) => {
write!(f, "expected {} fn, found {} fn",
values.expected,
values.found)
}
Mutability => write!(f, "types differ in mutability"),
FixedArraySize(values) => {
write!(f, "expected an array with a fixed size of {} elements, \
found one with {} elements",
values.expected,
values.found)
}
TupleSize(values) => {
write!(f, "expected a tuple with {} elements, \
found one with {} elements",
values.expected,
values.found)
}
ArgCount => {
write!(f, "incorrect number of function parameters")
}
RegionsDoesNotOutlive(..) => {
write!(f, "lifetime mismatch")
}
RegionsInsufficientlyPolymorphic(br, _) => {
write!(f,
"expected bound lifetime parameter{}{}, found concrete lifetime",
if br.is_named() { " " } else { "" },
br)
}
RegionsOverlyPolymorphic(br, _) => {
write!(f,
"expected concrete lifetime, found bound lifetime parameter{}{}",
if br.is_named() { " " } else { "" },
br)
}
Sorts(values) => ty::tls::with(|tcx| {
report_maybe_different(f, &values.expected.sort_string(tcx),
&values.found.sort_string(tcx))
}),
Traits(values) => ty::tls::with(|tcx| {
report_maybe_different(f,
&format!("trait `{}`",
tcx.item_path_str(values.expected)),
&format!("trait `{}`",
tcx.item_path_str(values.found)))
}),
IntMismatch(ref values) => {
write!(f, "expected `{:?}`, found `{:?}`",
values.expected,
values.found)
}
FloatMismatch(ref values) => {
write!(f, "expected `{:?}`, found `{:?}`",
values.expected,
values.found)
}
VariadicMismatch(ref values) => {
write!(f, "expected {} fn, found {} function",
if values.expected { "variadic" } else { "non-variadic" },
if values.found { "variadic" } else { "non-variadic" })
}
ProjectionMismatched(ref values) => ty::tls::with(|tcx| {
write!(f, "expected {}, found {}",
tcx.item_path_str(values.expected),
tcx.item_path_str(values.found))
}),
ProjectionBoundsLength(ref values) => {
write!(f, "expected {} associated type bindings, found {}",
values.expected,
values.found)
},
ExistentialMismatch(ref values) => {
report_maybe_different(f, &format!("trait `{}`", values.expected),
&format!("trait `{}`", values.found))
}
}
}
}
impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> {
pub fn sort_string(&self, tcx: TyCtxt<'a, 'gcx, 'lcx>) -> Cow<'static, str> {
match self.sty {
ty::Bool | ty::Char | ty::Int(_) |
ty::Uint(_) | ty::Float(_) | ty::Str | ty::Never => self.to_string().into(),
ty::Tuple(ref tys) if tys.is_empty() => self.to_string().into(),
ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.item_path_str(def.did)).into(),
ty::Foreign(def_id) => format!("extern type `{}`", tcx.item_path_str(def_id)).into(),
ty::Array(_, n) => {
match n.assert_usize(tcx) {
Some(n) => format!("array of {} elements", n).into(),
None => "array".into(),
}
}
ty::Slice(_) => "slice".into(),
ty::RawPtr(_) => "*-ptr".into(),
ty::Ref(region, ty, mutbl) => {
let tymut = ty::TypeAndMut { ty, mutbl };
let tymut_string = tymut.to_string();
if tymut_string == "_" || //unknown type name,
tymut_string.len() > 10 || //name longer than saying "reference",
region.to_string()!= "" //... or a complex type
{
format!("{}reference", match mutbl {
hir::Mutability::MutMutable => "mutable ",
_ => ""
}).into()
} else {
format!("&{}", tymut_string).into()
}
}
ty::FnDef(..) => "fn item".into(),
ty::FnPtr(_) => "fn pointer".into(),
ty::Dynamic(ref inner,..) => {
format!("trait {}", tcx.item_path_str(inner.principal().def_id())).into()
}
ty::Closure(..) => "closure".into(),
ty::Generator(..) => "generator".into(),
ty::GeneratorWitness(..) => "generator witness".into(),
ty::Tuple(..) => "tuple".into(),
ty::Infer(ty::TyVar(_)) => "inferred type".into(),
ty::Infer(ty::IntVar(_)) => "integral variable".into(),
ty::Infer(ty::FloatVar(_)) => "floating-point variable".into(),
ty::Placeholder(..) => "placeholder type".into(),
ty::Bound(..) => "bound type".into(),
ty::Infer(ty::FreshTy(_)) => "fresh type".into(),
ty::Infer(ty::FreshIntTy(_)) => "fresh integral type".into(),
ty::Infer(ty::FreshFloatTy(_)) => "fresh floating-point type".into(),
ty::Projection(_) => "associated type".into(),
ty::UnnormalizedProjection(_) => "non-normalized associated type".into(),
ty::Param(ref p) => {
if p.is_self() {
"Self".into()
} else {
"type parameter".into()
}
}
ty::Opaque(..) => "opaque type".into(),
ty::Error => "type error".into(),
}
}
}
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
pub fn | (self,
db: &mut DiagnosticBuilder<'_>,
err: &TypeError<'tcx>,
sp: Span) {
use self::TypeError::*;
match err.clone() {
Sorts(values) => {
let expected_str = values.expected.sort_string(self);
let found_str = values.found.sort_string(self);
if expected_str == found_str && expected_str == "closure" {
db.note("no two closures, even if identical, have the same type");
db.help("consider boxing your closure and/or using it as a trait object");
}
if let (ty::Infer(ty::IntVar(_)), ty::Float(_)) =
(&values.found.sty, &values.expected.sty) // Issue #53280
{
if let Ok(snippet) = self.sess.source_map().span_to_snippet(sp) {
if snippet.chars().all(|c| c.is_digit(10) || c == '-' || c == '_') {
db.span_suggestion_with_applicability(
sp,
"use a float literal",
format!("{}.0", snippet),
Applicability::MachineApplicable
);
}
}
}
},
CyclicTy(ty) => {
// Watch out for various cases of cyclic types and try to explain.
if ty.is_closure() || ty.is_generator() {
db.note("closures cannot capture themselves or take themselves as argument;\n\
this error may be the result of a recent compiler bug-fix,\n\
see https://github.com/rust-lang/rust/issues/46062 for more details");
}
}
_ => {}
}
}
}
| note_and_explain_type_err | identifier_name |
logging-separate-lines.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-windows
// exec-env:RUST_LOG=debug
// compile-flags:-C debug-assertions=y
#[macro_use]
extern crate log;
use std::old_io::Command; | let args: Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "child" {
debug!("foo");
debug!("bar");
return
}
let p = Command::new(&args[0])
.arg("child")
.spawn().unwrap().wait_with_output().unwrap();
assert!(p.status.success());
let mut lines = str::from_utf8(&p.error).unwrap().lines();
assert!(lines.next().unwrap().contains("foo"));
assert!(lines.next().unwrap().contains("bar"));
} | use std::env;
use std::str;
fn main() { | random_line_split |
logging-separate-lines.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-windows
// exec-env:RUST_LOG=debug
// compile-flags:-C debug-assertions=y
#[macro_use]
extern crate log;
use std::old_io::Command;
use std::env;
use std::str;
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "child" |
let p = Command::new(&args[0])
.arg("child")
.spawn().unwrap().wait_with_output().unwrap();
assert!(p.status.success());
let mut lines = str::from_utf8(&p.error).unwrap().lines();
assert!(lines.next().unwrap().contains("foo"));
assert!(lines.next().unwrap().contains("bar"));
}
| {
debug!("foo");
debug!("bar");
return
} | conditional_block |
logging-separate-lines.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-windows
// exec-env:RUST_LOG=debug
// compile-flags:-C debug-assertions=y
#[macro_use]
extern crate log;
use std::old_io::Command;
use std::env;
use std::str;
fn | () {
let args: Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "child" {
debug!("foo");
debug!("bar");
return
}
let p = Command::new(&args[0])
.arg("child")
.spawn().unwrap().wait_with_output().unwrap();
assert!(p.status.success());
let mut lines = str::from_utf8(&p.error).unwrap().lines();
assert!(lines.next().unwrap().contains("foo"));
assert!(lines.next().unwrap().contains("bar"));
}
| main | identifier_name |
logging-separate-lines.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-windows
// exec-env:RUST_LOG=debug
// compile-flags:-C debug-assertions=y
#[macro_use]
extern crate log;
use std::old_io::Command;
use std::env;
use std::str;
fn main() | {
let args: Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "child" {
debug!("foo");
debug!("bar");
return
}
let p = Command::new(&args[0])
.arg("child")
.spawn().unwrap().wait_with_output().unwrap();
assert!(p.status.success());
let mut lines = str::from_utf8(&p.error).unwrap().lines();
assert!(lines.next().unwrap().contains("foo"));
assert!(lines.next().unwrap().contains("bar"));
} | identifier_body |
|
issue-33884.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-cloudabi no std::net support
use std::net::TcpListener;
use std::net::TcpStream;
use std::io::{self, Read, Write};
fn | (stream: TcpStream) -> io::Result<()> {
stream.write_fmt(format!("message received"))
//~^ ERROR mismatched types
}
fn main() {
if let Ok(listener) = TcpListener::bind("127.0.0.1:8080") {
for incoming in listener.incoming() {
if let Ok(stream) = incoming {
handle_client(stream);
}
}
}
}
| handle_client | identifier_name |
issue-33884.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-cloudabi no std::net support
use std::net::TcpListener;
use std::net::TcpStream;
use std::io::{self, Read, Write};
fn handle_client(stream: TcpStream) -> io::Result<()> |
fn main() {
if let Ok(listener) = TcpListener::bind("127.0.0.1:8080") {
for incoming in listener.incoming() {
if let Ok(stream) = incoming {
handle_client(stream);
}
}
}
}
| {
stream.write_fmt(format!("message received"))
//~^ ERROR mismatched types
} | identifier_body |
issue-33884.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-cloudabi no std::net support
use std::net::TcpListener; | use std::net::TcpStream;
use std::io::{self, Read, Write};
fn handle_client(stream: TcpStream) -> io::Result<()> {
stream.write_fmt(format!("message received"))
//~^ ERROR mismatched types
}
fn main() {
if let Ok(listener) = TcpListener::bind("127.0.0.1:8080") {
for incoming in listener.incoming() {
if let Ok(stream) = incoming {
handle_client(stream);
}
}
}
} | random_line_split |
|
issue-33884.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-cloudabi no std::net support
use std::net::TcpListener;
use std::net::TcpStream;
use std::io::{self, Read, Write};
fn handle_client(stream: TcpStream) -> io::Result<()> {
stream.write_fmt(format!("message received"))
//~^ ERROR mismatched types
}
fn main() {
if let Ok(listener) = TcpListener::bind("127.0.0.1:8080") {
for incoming in listener.incoming() {
if let Ok(stream) = incoming |
}
}
}
| {
handle_client(stream);
} | conditional_block |
util.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc::uintptr_t;
use option::Option;
use option::Option::{Some, None};
use os;
use str::{FromStr, from_str, Str};
use sync::atomic;
/// Dynamically inquire about whether we're running under V.
/// You should usually not use this unless your test definitely
/// can't run correctly un-altered. Valgrind is there to help
/// you notice weirdness in normal, un-doctored code paths!
pub fn running_on_valgrind() -> bool {
extern {
fn rust_running_on_valgrind() -> uintptr_t;
}
unsafe { rust_running_on_valgrind()!= 0 }
}
/// Valgrind has a fixed-sized array (size around 2000) of segment descriptors
/// wired into it; this is a hard limit and requires rebuilding valgrind if you
/// want to go beyond it. Normally this is not a problem, but in some tests, we
/// produce a lot of threads casually. Making lots of threads alone might not
/// be a problem _either_, except on OSX, the segments produced for new threads
/// _take a while_ to get reclaimed by the OS. Combined with the fact that libuv
/// schedulers fork off a separate thread for polling fsevents on OSX, we get a
/// perfect storm of creating "too many mappings" for valgrind to handle when
/// running certain stress tests in the runtime.
pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool |
pub fn min_stack() -> uint {
static MIN: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
match MIN.load(atomic::SeqCst) {
0 => {}
n => return n - 1,
}
let amt = os::getenv("RUST_MIN_STACK").and_then(|s| from_str(s.as_slice()));
let amt = amt.unwrap_or(2 * 1024 * 1024);
// 0 is our sentinel value, so ensure that we'll never see 0 after
// initialization has run
MIN.store(amt + 1, atomic::SeqCst);
return amt;
}
/// Get's the number of scheduler threads requested by the environment
/// either `RUST_THREADS` or `num_cpus`.
pub fn default_sched_threads() -> uint {
match os::getenv("RUST_THREADS") {
Some(nstr) => {
let opt_n: Option<uint> = FromStr::from_str(nstr.as_slice());
match opt_n {
Some(n) if n > 0 => n,
_ => panic!("`RUST_THREADS` is `{}`, should be a positive integer", nstr)
}
}
None => {
if limit_thread_creation_due_to_osx_and_valgrind() {
1
} else {
os::num_cpus()
}
}
}
}
| {
(cfg!(target_os="macos")) && running_on_valgrind()
} | identifier_body |
util.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc::uintptr_t;
use option::Option;
use option::Option::{Some, None};
use os;
use str::{FromStr, from_str, Str};
use sync::atomic;
/// Dynamically inquire about whether we're running under V.
/// You should usually not use this unless your test definitely
/// can't run correctly un-altered. Valgrind is there to help
/// you notice weirdness in normal, un-doctored code paths!
pub fn running_on_valgrind() -> bool {
extern {
fn rust_running_on_valgrind() -> uintptr_t;
}
unsafe { rust_running_on_valgrind()!= 0 }
}
/// Valgrind has a fixed-sized array (size around 2000) of segment descriptors
/// wired into it; this is a hard limit and requires rebuilding valgrind if you
/// want to go beyond it. Normally this is not a problem, but in some tests, we
/// produce a lot of threads casually. Making lots of threads alone might not
/// be a problem _either_, except on OSX, the segments produced for new threads
/// _take a while_ to get reclaimed by the OS. Combined with the fact that libuv
/// schedulers fork off a separate thread for polling fsevents on OSX, we get a
/// perfect storm of creating "too many mappings" for valgrind to handle when
/// running certain stress tests in the runtime.
pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool {
(cfg!(target_os="macos")) && running_on_valgrind()
}
pub fn min_stack() -> uint {
static MIN: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
match MIN.load(atomic::SeqCst) {
0 => {}
n => return n - 1,
}
let amt = os::getenv("RUST_MIN_STACK").and_then(|s| from_str(s.as_slice()));
let amt = amt.unwrap_or(2 * 1024 * 1024); | // 0 is our sentinel value, so ensure that we'll never see 0 after
// initialization has run
MIN.store(amt + 1, atomic::SeqCst);
return amt;
}
/// Get's the number of scheduler threads requested by the environment
/// either `RUST_THREADS` or `num_cpus`.
pub fn default_sched_threads() -> uint {
match os::getenv("RUST_THREADS") {
Some(nstr) => {
let opt_n: Option<uint> = FromStr::from_str(nstr.as_slice());
match opt_n {
Some(n) if n > 0 => n,
_ => panic!("`RUST_THREADS` is `{}`, should be a positive integer", nstr)
}
}
None => {
if limit_thread_creation_due_to_osx_and_valgrind() {
1
} else {
os::num_cpus()
}
}
}
} | random_line_split |
|
util.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc::uintptr_t;
use option::Option;
use option::Option::{Some, None};
use os;
use str::{FromStr, from_str, Str};
use sync::atomic;
/// Dynamically inquire about whether we're running under V.
/// You should usually not use this unless your test definitely
/// can't run correctly un-altered. Valgrind is there to help
/// you notice weirdness in normal, un-doctored code paths!
pub fn running_on_valgrind() -> bool {
extern {
fn rust_running_on_valgrind() -> uintptr_t;
}
unsafe { rust_running_on_valgrind()!= 0 }
}
/// Valgrind has a fixed-sized array (size around 2000) of segment descriptors
/// wired into it; this is a hard limit and requires rebuilding valgrind if you
/// want to go beyond it. Normally this is not a problem, but in some tests, we
/// produce a lot of threads casually. Making lots of threads alone might not
/// be a problem _either_, except on OSX, the segments produced for new threads
/// _take a while_ to get reclaimed by the OS. Combined with the fact that libuv
/// schedulers fork off a separate thread for polling fsevents on OSX, we get a
/// perfect storm of creating "too many mappings" for valgrind to handle when
/// running certain stress tests in the runtime.
pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool {
(cfg!(target_os="macos")) && running_on_valgrind()
}
pub fn min_stack() -> uint {
static MIN: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
match MIN.load(atomic::SeqCst) {
0 => {}
n => return n - 1,
}
let amt = os::getenv("RUST_MIN_STACK").and_then(|s| from_str(s.as_slice()));
let amt = amt.unwrap_or(2 * 1024 * 1024);
// 0 is our sentinel value, so ensure that we'll never see 0 after
// initialization has run
MIN.store(amt + 1, atomic::SeqCst);
return amt;
}
/// Get's the number of scheduler threads requested by the environment
/// either `RUST_THREADS` or `num_cpus`.
pub fn | () -> uint {
match os::getenv("RUST_THREADS") {
Some(nstr) => {
let opt_n: Option<uint> = FromStr::from_str(nstr.as_slice());
match opt_n {
Some(n) if n > 0 => n,
_ => panic!("`RUST_THREADS` is `{}`, should be a positive integer", nstr)
}
}
None => {
if limit_thread_creation_due_to_osx_and_valgrind() {
1
} else {
os::num_cpus()
}
}
}
}
| default_sched_threads | identifier_name |
lib.rs | use std::collections::HashMap;
use std::fmt::Display;
use std::path::Path;
use std::sync::Arc;
use log::debug;
use serde::{Deserialize, Serialize};
mod eval;
pub mod util;
#[derive(Clone, Serialize, Deserialize, Default, PartialEq, Debug)]
struct EvalServiceCfg {
timeout: usize,
languages: HashMap<String, LanguageCfg>,
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
struct LanguageCfg {
code_before: Option<String>,
code_after: Option<String>,
timeout: Option<usize>,
#[serde(flatten)]
backend: BackendCfg,
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
#[serde(untagged)]
enum BackendCfg {
Exec(ExecBackend),
Network(NetworkBackend),
UnixSocket(UnixSocketBackend),
}
#[derive(Clone, Debug)]
pub struct EvalService {
timeout: usize,
languages: HashMap<String, Arc<Language>>,
}
#[derive(Clone, PartialEq, Debug)]
pub struct Language {
name: String,
code_before: Option<String>,
code_after: Option<String>,
timeout: Option<usize>,
backend: Backend,
}
#[derive(Clone, PartialEq, Debug)]
enum Backend {
Exec(Arc<ExecBackend>),
Network(Arc<NetworkBackend>),
UnixSocket(Arc<UnixSocketBackend>),
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
pub struct ExecBackend {
cmdline: Vec<String>,
timeout_prefix: Option<String>,
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
pub struct NetworkBackend {
network_addr: String,
timeout_cmdline: Option<Vec<String>>,
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
pub struct UnixSocketBackend {
socket_addr: String,
timeout_cmdline: Option<Vec<String>>,
}
impl Language {
fn from(name: String, default_timeout: usize, cfg: LanguageCfg) -> Self {
Language {
name,
code_before: cfg.code_before,
code_after: cfg.code_after,
timeout: cfg.timeout.or_else(|| Some(default_timeout)),
backend: match cfg.backend {
BackendCfg::Exec(x) => Backend::Exec(Arc::new(x)),
BackendCfg::Network(x) => Backend::Network(Arc::new(x)),
BackendCfg::UnixSocket(x) => Backend::UnixSocket(Arc::new(x)),
},
}
}
}
impl EvalService {
fn fixup(cfg: EvalServiceCfg) -> Self {
debug!("Loaded config: {:#?}", cfg);
let mut new = EvalService {
timeout: cfg.timeout,
languages: HashMap::new(),
};
let timeout = cfg.timeout;
for (name, lang) in cfg.languages.into_iter() {
new.languages
.insert(name.clone(), Arc::new(Language::from(name, timeout, lang)));
}
new
}
pub async fn from_toml_file<P>(path: P) -> Result<Self, String>
where
P: AsRef<Path> + Send + Display +'static,
{
Ok(EvalService::fixup(util::decode(path).await?))
}
pub fn from_toml(toml: &str) -> Result<Self, String> {
toml::from_str(toml)
.map(EvalService::fixup)
.map_err(|x| format!("could not parse TOML: {:?}", x))
}
pub fn langs(&self) -> impl Iterator<Item = (&str, &Arc<Language>)> {
self.languages.iter().map(|(n, l)| (n.as_str(), l))
}
pub fn get(&self, lang: &str) -> Option<&Arc<Language>> {
self.languages.get(lang)
}
}
static EMPTY_U8: [u8; 0] = [];
| context: Option<U>,
) -> Result<String, String>
where
T: AsRef<str>,
U: AsRef<str>,
{
debug!("evaluating {}: \"{}\"", self.name, code.as_ref());
let timeout = match timeout {
Some(0) => None,
Some(n) => Some(n),
None => self.timeout,
};
match self.backend {
Backend::Exec(ref lang) => {
eval::exec(lang.clone(), timeout, self.wrap_code(code.as_ref())).await
}
Backend::UnixSocket(ref lang) => {
eval::unix(
lang.clone(),
timeout,
context.map(|x| x.as_ref().to_owned()), // FIXME copy :(
self.wrap_code(code.as_ref()),
)
.await
}
_ => Ok("Unimplemented".to_owned()),
}
}
fn wrap_code(&self, raw: &str) -> String {
let mut code = String::with_capacity(raw.len());
if let Some(ref prefix) = self.code_before {
code.push_str(prefix);
}
code.push_str(raw);
if let Some(ref postfix) = self.code_after {
code.push_str(postfix);
}
code
}
}
#[cfg(test)]
mod test {
#[test]
fn test_decode() {
let toml = r#"
timeout = 20
[languages.rs]
cmdline = ["rustc", "-O"]
[languages.'rs!']
timeout = 0
cmdline = ["rustc", "-O"]
"#;
println!("{:#?}", super::EvalService::from_toml(toml).unwrap());
}
} | impl Language {
pub async fn eval<T, U>(
&self,
code: T,
timeout: Option<usize>, | random_line_split |
lib.rs | use std::collections::HashMap;
use std::fmt::Display;
use std::path::Path;
use std::sync::Arc;
use log::debug;
use serde::{Deserialize, Serialize};
mod eval;
pub mod util;
#[derive(Clone, Serialize, Deserialize, Default, PartialEq, Debug)]
struct EvalServiceCfg {
timeout: usize,
languages: HashMap<String, LanguageCfg>,
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
struct LanguageCfg {
code_before: Option<String>,
code_after: Option<String>,
timeout: Option<usize>,
#[serde(flatten)]
backend: BackendCfg,
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
#[serde(untagged)]
enum | {
Exec(ExecBackend),
Network(NetworkBackend),
UnixSocket(UnixSocketBackend),
}
#[derive(Clone, Debug)]
pub struct EvalService {
timeout: usize,
languages: HashMap<String, Arc<Language>>,
}
#[derive(Clone, PartialEq, Debug)]
pub struct Language {
name: String,
code_before: Option<String>,
code_after: Option<String>,
timeout: Option<usize>,
backend: Backend,
}
#[derive(Clone, PartialEq, Debug)]
enum Backend {
Exec(Arc<ExecBackend>),
Network(Arc<NetworkBackend>),
UnixSocket(Arc<UnixSocketBackend>),
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
pub struct ExecBackend {
cmdline: Vec<String>,
timeout_prefix: Option<String>,
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
pub struct NetworkBackend {
network_addr: String,
timeout_cmdline: Option<Vec<String>>,
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
pub struct UnixSocketBackend {
socket_addr: String,
timeout_cmdline: Option<Vec<String>>,
}
impl Language {
fn from(name: String, default_timeout: usize, cfg: LanguageCfg) -> Self {
Language {
name,
code_before: cfg.code_before,
code_after: cfg.code_after,
timeout: cfg.timeout.or_else(|| Some(default_timeout)),
backend: match cfg.backend {
BackendCfg::Exec(x) => Backend::Exec(Arc::new(x)),
BackendCfg::Network(x) => Backend::Network(Arc::new(x)),
BackendCfg::UnixSocket(x) => Backend::UnixSocket(Arc::new(x)),
},
}
}
}
impl EvalService {
fn fixup(cfg: EvalServiceCfg) -> Self {
debug!("Loaded config: {:#?}", cfg);
let mut new = EvalService {
timeout: cfg.timeout,
languages: HashMap::new(),
};
let timeout = cfg.timeout;
for (name, lang) in cfg.languages.into_iter() {
new.languages
.insert(name.clone(), Arc::new(Language::from(name, timeout, lang)));
}
new
}
pub async fn from_toml_file<P>(path: P) -> Result<Self, String>
where
P: AsRef<Path> + Send + Display +'static,
{
Ok(EvalService::fixup(util::decode(path).await?))
}
pub fn from_toml(toml: &str) -> Result<Self, String> {
toml::from_str(toml)
.map(EvalService::fixup)
.map_err(|x| format!("could not parse TOML: {:?}", x))
}
pub fn langs(&self) -> impl Iterator<Item = (&str, &Arc<Language>)> {
self.languages.iter().map(|(n, l)| (n.as_str(), l))
}
pub fn get(&self, lang: &str) -> Option<&Arc<Language>> {
self.languages.get(lang)
}
}
static EMPTY_U8: [u8; 0] = [];
impl Language {
pub async fn eval<T, U>(
&self,
code: T,
timeout: Option<usize>,
context: Option<U>,
) -> Result<String, String>
where
T: AsRef<str>,
U: AsRef<str>,
{
debug!("evaluating {}: \"{}\"", self.name, code.as_ref());
let timeout = match timeout {
Some(0) => None,
Some(n) => Some(n),
None => self.timeout,
};
match self.backend {
Backend::Exec(ref lang) => {
eval::exec(lang.clone(), timeout, self.wrap_code(code.as_ref())).await
}
Backend::UnixSocket(ref lang) => {
eval::unix(
lang.clone(),
timeout,
context.map(|x| x.as_ref().to_owned()), // FIXME copy :(
self.wrap_code(code.as_ref()),
)
.await
}
_ => Ok("Unimplemented".to_owned()),
}
}
fn wrap_code(&self, raw: &str) -> String {
let mut code = String::with_capacity(raw.len());
if let Some(ref prefix) = self.code_before {
code.push_str(prefix);
}
code.push_str(raw);
if let Some(ref postfix) = self.code_after {
code.push_str(postfix);
}
code
}
}
#[cfg(test)]
mod test {
#[test]
fn test_decode() {
let toml = r#"
timeout = 20
[languages.rs]
cmdline = ["rustc", "-O"]
[languages.'rs!']
timeout = 0
cmdline = ["rustc", "-O"]
"#;
println!("{:#?}", super::EvalService::from_toml(toml).unwrap());
}
}
| BackendCfg | identifier_name |
tilemap.rs | extern crate gl;
extern crate nalgebra;
use gl::types::*;
use nalgebra::na::{Mat4};
use nalgebra::na;
use std::mem;
use std::ptr;
use super::engine;
use super::shader;
use super::math;
//static CHUNK_SIZE : u8 = 10;
pub struct TilemapChunk
{
shader :shader::ShaderProgram,
vao : u32,
vbo_vertices : u32,
vbo_indices: u32,
vbo_tileid : u32,
indices_count : u32
//save model matrix
//hold ref/owned to TilemapChunkData logical part?
// tile_texture_atlas
// tile_texture_atlas_normal? <- normal map for tiles?
}
impl TilemapChunk
{
pub fn new() -> TilemapChunk
{
TilemapChunk {
shader: shader::ShaderProgram::new(),
vao: 0,
vbo_vertices: 0,
vbo_indices: 0,
indices_count: 0,
vbo_tileid: 0, //vbo for uv(texture) coordinates?
}
}
// tile_count_x: how many tiles on the x axis
// tile_count_y: how many tiles on the y axis
pub fn setup(&mut self, tile_count_x: u32, tile_count_y: u32)
{
//create dummy tile layout
let mut tilemap_chunk_vertices : Vec<GLfloat> = Vec::new();
let mut tilemap_chunk_indices : Vec<GLuint> = Vec::new();
//create the grid vertices
//create tile plane vertices
for i in range(0u32, (tile_count_x+1)*(tile_count_y+1))
{
let x = i % (tile_count_x+1); //first this counts up (column)
let y = i / (tile_count_x+1); //then this counts up (row)
tilemap_chunk_vertices.push(0.0+x as f32);
tilemap_chunk_vertices.push(0.0+y as f32);
//println!("vertex[{}]: {}, {}", i, x, y);
//calculate indices for the triangles
//indices are related to vertex indices not the vector index
//where each vertex has 2 entries
if x < tile_count_x
&& y < tile_count_y
{
let index_of = |x :u32, y:u32| x + (y * (tile_count_x+1));
//requires 2 triangles per tile (quad)
tilemap_chunk_indices.push(i); //index of (x,y)
tilemap_chunk_indices.push(index_of(x+1,y));
tilemap_chunk_indices.push(index_of(x, y+1));
//println!("\ttriangle_one: {}", tilemap_chunk_indices.slice_from(tilemap_chunk_indices.len()-3));
tilemap_chunk_indices.push(index_of(x, y+1));
tilemap_chunk_indices.push(index_of(x+1,y));
tilemap_chunk_indices.push(index_of(x+1, y+1));
//println!("\ttriangle_two: {}", tilemap_chunk_indices.slice_from(tilemap_chunk_indices.len()-3));
}
}
self.indices_count = tilemap_chunk_indices.len() as u32;
//println!("tilemap::setup() Count of vertices: {}", tilemap_chunk_vertices.len()/2); //x,y so /2
//println!("tilemap::setup() Count of indices: {}", self.indices_count);
//println!("tilemap::setup() vertices: {}", tilemap_chunk_vertices);
//TODO shader config elsewhere?
self.shader.add_shader_file("./data/client/shader/tilemap.vs.glsl", gl::VERTEX_SHADER);
self.shader.add_shader_file("./data/client/shader/tilemap.fs.glsl", gl::FRAGMENT_SHADER);
self.shader.set_fragment_name("fragColor"); //required before linking
self.shader.link_program();
self.shader.use_program();
unsafe
{
// Create Vertex Array Object
gl::GenVertexArrays(1, &mut self.vao);
gl::BindVertexArray(self.vao);
// Create a Vertex Buffer Object and copy the vertex data to it
gl::GenBuffers(1, &mut self.vbo_vertices);
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo_vertices);
gl::BufferData(gl::ARRAY_BUFFER,
(tilemap_chunk_vertices.len() * mem::size_of::<GLfloat>()) as GLsizeiptr,
tilemap_chunk_vertices.as_ptr() as *const GLvoid,
gl::STATIC_DRAW);
// Specify the layout of the vertex data
let vertex_attr = self.shader.get_attrib("my_vertex");
gl::EnableVertexAttribArray(vertex_attr as GLuint);
gl::VertexAttribPointer(vertex_attr as GLuint, 2, gl::FLOAT,
gl::FALSE as GLboolean, 0, ptr::null());
//vertex indices
gl::GenBuffers(1, &mut self.vbo_indices);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.vbo_indices);
gl::BufferData(gl::ELEMENT_ARRAY_BUFFER, (tilemap_chunk_indices.len() * mem::size_of::<GLuint>()) as GLsizeiptr,
tilemap_chunk_indices.as_ptr() as *const GLvoid, gl::STATIC_DRAW);
//bind uniform
//disable all?:
//glBindVertexArray(0);
//glDisableVertexAttribArray
//gl::BindBuffer(*, 0)? for booth?
}
}
| /*
fn set_program_variable_vbo(&self, name: &str)
{
//in
}
*/
//move to shader?
fn set_program_uniform_mat4(&self, name: &str, m: &Mat4<f32>)
{
//self.shader
let id = self.shader.get_uniform(name);
unsafe {
gl::UniformMatrix4fv(id, 1, gl::FALSE as u8, mem::transmute(m));
}
}
}
impl engine::Drawable for TilemapChunk
{
fn draw(&self, rc: &engine::RenderContext)
{
//use shader
self.shader.use_program();
let mut model : Mat4<f32> = na::zero();
math::set_identity(&mut model);
//set uniform
//let mvp = /*rc.projm **/ rc.view;
let mvp = rc.projm * rc.view * model;
self.set_program_uniform_mat4("mvp", &mvp);
//bind vao
gl::BindVertexArray(self.vao);
//render
//gl::DrawArrays(gl::TRIANGLES, 0, self.indices_count as i32);
//with indices DrawElements must be used
unsafe {
gl::DrawElements(gl::TRIANGLE_STRIP, self.indices_count as i32, gl::UNSIGNED_INT, ptr::null());
}
//GL_TRIANGLE_STRIP?
//disable all
}
} | random_line_split |
|
tilemap.rs |
extern crate gl;
extern crate nalgebra;
use gl::types::*;
use nalgebra::na::{Mat4};
use nalgebra::na;
use std::mem;
use std::ptr;
use super::engine;
use super::shader;
use super::math;
//static CHUNK_SIZE : u8 = 10;
pub struct TilemapChunk
{
shader :shader::ShaderProgram,
vao : u32,
vbo_vertices : u32,
vbo_indices: u32,
vbo_tileid : u32,
indices_count : u32
//save model matrix
//hold ref/owned to TilemapChunkData logical part?
// tile_texture_atlas
// tile_texture_atlas_normal? <- normal map for tiles?
}
impl TilemapChunk
{
pub fn new() -> TilemapChunk
{
TilemapChunk {
shader: shader::ShaderProgram::new(),
vao: 0,
vbo_vertices: 0,
vbo_indices: 0,
indices_count: 0,
vbo_tileid: 0, //vbo for uv(texture) coordinates?
}
}
// tile_count_x: how many tiles on the x axis
// tile_count_y: how many tiles on the y axis
pub fn setup(&mut self, tile_count_x: u32, tile_count_y: u32)
{
//create dummy tile layout
let mut tilemap_chunk_vertices : Vec<GLfloat> = Vec::new();
let mut tilemap_chunk_indices : Vec<GLuint> = Vec::new();
//create the grid vertices
//create tile plane vertices
for i in range(0u32, (tile_count_x+1)*(tile_count_y+1))
{
let x = i % (tile_count_x+1); //first this counts up (column)
let y = i / (tile_count_x+1); //then this counts up (row)
tilemap_chunk_vertices.push(0.0+x as f32);
tilemap_chunk_vertices.push(0.0+y as f32);
//println!("vertex[{}]: {}, {}", i, x, y);
//calculate indices for the triangles
//indices are related to vertex indices not the vector index
//where each vertex has 2 entries
if x < tile_count_x
&& y < tile_count_y
|
}
self.indices_count = tilemap_chunk_indices.len() as u32;
//println!("tilemap::setup() Count of vertices: {}", tilemap_chunk_vertices.len()/2); //x,y so /2
//println!("tilemap::setup() Count of indices: {}", self.indices_count);
//println!("tilemap::setup() vertices: {}", tilemap_chunk_vertices);
//TODO shader config elsewhere?
self.shader.add_shader_file("./data/client/shader/tilemap.vs.glsl", gl::VERTEX_SHADER);
self.shader.add_shader_file("./data/client/shader/tilemap.fs.glsl", gl::FRAGMENT_SHADER);
self.shader.set_fragment_name("fragColor"); //required before linking
self.shader.link_program();
self.shader.use_program();
unsafe
{
// Create Vertex Array Object
gl::GenVertexArrays(1, &mut self.vao);
gl::BindVertexArray(self.vao);
// Create a Vertex Buffer Object and copy the vertex data to it
gl::GenBuffers(1, &mut self.vbo_vertices);
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo_vertices);
gl::BufferData(gl::ARRAY_BUFFER,
(tilemap_chunk_vertices.len() * mem::size_of::<GLfloat>()) as GLsizeiptr,
tilemap_chunk_vertices.as_ptr() as *const GLvoid,
gl::STATIC_DRAW);
// Specify the layout of the vertex data
let vertex_attr = self.shader.get_attrib("my_vertex");
gl::EnableVertexAttribArray(vertex_attr as GLuint);
gl::VertexAttribPointer(vertex_attr as GLuint, 2, gl::FLOAT,
gl::FALSE as GLboolean, 0, ptr::null());
//vertex indices
gl::GenBuffers(1, &mut self.vbo_indices);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.vbo_indices);
gl::BufferData(gl::ELEMENT_ARRAY_BUFFER, (tilemap_chunk_indices.len() * mem::size_of::<GLuint>()) as GLsizeiptr,
tilemap_chunk_indices.as_ptr() as *const GLvoid, gl::STATIC_DRAW);
//bind uniform
//disable all?:
//glBindVertexArray(0);
//glDisableVertexAttribArray
//gl::BindBuffer(*, 0)? for booth?
}
}
/*
fn set_program_variable_vbo(&self, name: &str)
{
//in
}
*/
//move to shader?
fn set_program_uniform_mat4(&self, name: &str, m: &Mat4<f32>)
{
//self.shader
let id = self.shader.get_uniform(name);
unsafe {
gl::UniformMatrix4fv(id, 1, gl::FALSE as u8, mem::transmute(m));
}
}
}
impl engine::Drawable for TilemapChunk
{
fn draw(&self, rc: &engine::RenderContext)
{
//use shader
self.shader.use_program();
let mut model : Mat4<f32> = na::zero();
math::set_identity(&mut model);
//set uniform
//let mvp = /*rc.projm **/ rc.view;
let mvp = rc.projm * rc.view * model;
self.set_program_uniform_mat4("mvp", &mvp);
//bind vao
gl::BindVertexArray(self.vao);
//render
//gl::DrawArrays(gl::TRIANGLES, 0, self.indices_count as i32);
//with indices DrawElements must be used
unsafe {
gl::DrawElements(gl::TRIANGLE_STRIP, self.indices_count as i32, gl::UNSIGNED_INT, ptr::null());
}
//GL_TRIANGLE_STRIP?
//disable all
}
}
| {
let index_of = |x :u32, y:u32| x + (y * (tile_count_x+1));
//requires 2 triangles per tile (quad)
tilemap_chunk_indices.push(i); //index of (x,y)
tilemap_chunk_indices.push(index_of(x+1,y));
tilemap_chunk_indices.push(index_of(x, y+1));
//println!("\ttriangle_one: {}", tilemap_chunk_indices.slice_from(tilemap_chunk_indices.len()-3));
tilemap_chunk_indices.push(index_of(x, y+1));
tilemap_chunk_indices.push(index_of(x+1,y));
tilemap_chunk_indices.push(index_of(x+1, y+1));
//println!("\ttriangle_two: {}", tilemap_chunk_indices.slice_from(tilemap_chunk_indices.len()-3));
} | conditional_block |
tilemap.rs |
extern crate gl;
extern crate nalgebra;
use gl::types::*;
use nalgebra::na::{Mat4};
use nalgebra::na;
use std::mem;
use std::ptr;
use super::engine;
use super::shader;
use super::math;
//static CHUNK_SIZE : u8 = 10;
pub struct TilemapChunk
{
shader :shader::ShaderProgram,
vao : u32,
vbo_vertices : u32,
vbo_indices: u32,
vbo_tileid : u32,
indices_count : u32
//save model matrix
//hold ref/owned to TilemapChunkData logical part?
// tile_texture_atlas
// tile_texture_atlas_normal? <- normal map for tiles?
}
impl TilemapChunk
{
pub fn new() -> TilemapChunk
{
TilemapChunk {
shader: shader::ShaderProgram::new(),
vao: 0,
vbo_vertices: 0,
vbo_indices: 0,
indices_count: 0,
vbo_tileid: 0, //vbo for uv(texture) coordinates?
}
}
// tile_count_x: how many tiles on the x axis
// tile_count_y: how many tiles on the y axis
pub fn setup(&mut self, tile_count_x: u32, tile_count_y: u32)
{
//create dummy tile layout
let mut tilemap_chunk_vertices : Vec<GLfloat> = Vec::new();
let mut tilemap_chunk_indices : Vec<GLuint> = Vec::new();
//create the grid vertices
//create tile plane vertices
for i in range(0u32, (tile_count_x+1)*(tile_count_y+1))
{
let x = i % (tile_count_x+1); //first this counts up (column)
let y = i / (tile_count_x+1); //then this counts up (row)
tilemap_chunk_vertices.push(0.0+x as f32);
tilemap_chunk_vertices.push(0.0+y as f32);
//println!("vertex[{}]: {}, {}", i, x, y);
//calculate indices for the triangles
//indices are related to vertex indices not the vector index
//where each vertex has 2 entries
if x < tile_count_x
&& y < tile_count_y
{
let index_of = |x :u32, y:u32| x + (y * (tile_count_x+1));
//requires 2 triangles per tile (quad)
tilemap_chunk_indices.push(i); //index of (x,y)
tilemap_chunk_indices.push(index_of(x+1,y));
tilemap_chunk_indices.push(index_of(x, y+1));
//println!("\ttriangle_one: {}", tilemap_chunk_indices.slice_from(tilemap_chunk_indices.len()-3));
tilemap_chunk_indices.push(index_of(x, y+1));
tilemap_chunk_indices.push(index_of(x+1,y));
tilemap_chunk_indices.push(index_of(x+1, y+1));
//println!("\ttriangle_two: {}", tilemap_chunk_indices.slice_from(tilemap_chunk_indices.len()-3));
}
}
self.indices_count = tilemap_chunk_indices.len() as u32;
//println!("tilemap::setup() Count of vertices: {}", tilemap_chunk_vertices.len()/2); //x,y so /2
//println!("tilemap::setup() Count of indices: {}", self.indices_count);
//println!("tilemap::setup() vertices: {}", tilemap_chunk_vertices);
//TODO shader config elsewhere?
self.shader.add_shader_file("./data/client/shader/tilemap.vs.glsl", gl::VERTEX_SHADER);
self.shader.add_shader_file("./data/client/shader/tilemap.fs.glsl", gl::FRAGMENT_SHADER);
self.shader.set_fragment_name("fragColor"); //required before linking
self.shader.link_program();
self.shader.use_program();
unsafe
{
// Create Vertex Array Object
gl::GenVertexArrays(1, &mut self.vao);
gl::BindVertexArray(self.vao);
// Create a Vertex Buffer Object and copy the vertex data to it
gl::GenBuffers(1, &mut self.vbo_vertices);
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo_vertices);
gl::BufferData(gl::ARRAY_BUFFER,
(tilemap_chunk_vertices.len() * mem::size_of::<GLfloat>()) as GLsizeiptr,
tilemap_chunk_vertices.as_ptr() as *const GLvoid,
gl::STATIC_DRAW);
// Specify the layout of the vertex data
let vertex_attr = self.shader.get_attrib("my_vertex");
gl::EnableVertexAttribArray(vertex_attr as GLuint);
gl::VertexAttribPointer(vertex_attr as GLuint, 2, gl::FLOAT,
gl::FALSE as GLboolean, 0, ptr::null());
//vertex indices
gl::GenBuffers(1, &mut self.vbo_indices);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.vbo_indices);
gl::BufferData(gl::ELEMENT_ARRAY_BUFFER, (tilemap_chunk_indices.len() * mem::size_of::<GLuint>()) as GLsizeiptr,
tilemap_chunk_indices.as_ptr() as *const GLvoid, gl::STATIC_DRAW);
//bind uniform
//disable all?:
//glBindVertexArray(0);
//glDisableVertexAttribArray
//gl::BindBuffer(*, 0)? for booth?
}
}
/*
fn set_program_variable_vbo(&self, name: &str)
{
//in
}
*/
//move to shader?
fn set_program_uniform_mat4(&self, name: &str, m: &Mat4<f32>)
|
}
impl engine::Drawable for TilemapChunk
{
fn draw(&self, rc: &engine::RenderContext)
{
//use shader
self.shader.use_program();
let mut model : Mat4<f32> = na::zero();
math::set_identity(&mut model);
//set uniform
//let mvp = /*rc.projm **/ rc.view;
let mvp = rc.projm * rc.view * model;
self.set_program_uniform_mat4("mvp", &mvp);
//bind vao
gl::BindVertexArray(self.vao);
//render
//gl::DrawArrays(gl::TRIANGLES, 0, self.indices_count as i32);
//with indices DrawElements must be used
unsafe {
gl::DrawElements(gl::TRIANGLE_STRIP, self.indices_count as i32, gl::UNSIGNED_INT, ptr::null());
}
//GL_TRIANGLE_STRIP?
//disable all
}
}
| {
//self.shader
let id = self.shader.get_uniform(name);
unsafe {
gl::UniformMatrix4fv(id, 1, gl::FALSE as u8, mem::transmute(m));
}
} | identifier_body |
tilemap.rs |
extern crate gl;
extern crate nalgebra;
use gl::types::*;
use nalgebra::na::{Mat4};
use nalgebra::na;
use std::mem;
use std::ptr;
use super::engine;
use super::shader;
use super::math;
//static CHUNK_SIZE : u8 = 10;
pub struct |
{
shader :shader::ShaderProgram,
vao : u32,
vbo_vertices : u32,
vbo_indices: u32,
vbo_tileid : u32,
indices_count : u32
//save model matrix
//hold ref/owned to TilemapChunkData logical part?
// tile_texture_atlas
// tile_texture_atlas_normal? <- normal map for tiles?
}
impl TilemapChunk
{
pub fn new() -> TilemapChunk
{
TilemapChunk {
shader: shader::ShaderProgram::new(),
vao: 0,
vbo_vertices: 0,
vbo_indices: 0,
indices_count: 0,
vbo_tileid: 0, //vbo for uv(texture) coordinates?
}
}
// tile_count_x: how many tiles on the x axis
// tile_count_y: how many tiles on the y axis
pub fn setup(&mut self, tile_count_x: u32, tile_count_y: u32)
{
//create dummy tile layout
let mut tilemap_chunk_vertices : Vec<GLfloat> = Vec::new();
let mut tilemap_chunk_indices : Vec<GLuint> = Vec::new();
//create the grid vertices
//create tile plane vertices
for i in range(0u32, (tile_count_x+1)*(tile_count_y+1))
{
let x = i % (tile_count_x+1); //first this counts up (column)
let y = i / (tile_count_x+1); //then this counts up (row)
tilemap_chunk_vertices.push(0.0+x as f32);
tilemap_chunk_vertices.push(0.0+y as f32);
//println!("vertex[{}]: {}, {}", i, x, y);
//calculate indices for the triangles
//indices are related to vertex indices not the vector index
//where each vertex has 2 entries
if x < tile_count_x
&& y < tile_count_y
{
let index_of = |x :u32, y:u32| x + (y * (tile_count_x+1));
//requires 2 triangles per tile (quad)
tilemap_chunk_indices.push(i); //index of (x,y)
tilemap_chunk_indices.push(index_of(x+1,y));
tilemap_chunk_indices.push(index_of(x, y+1));
//println!("\ttriangle_one: {}", tilemap_chunk_indices.slice_from(tilemap_chunk_indices.len()-3));
tilemap_chunk_indices.push(index_of(x, y+1));
tilemap_chunk_indices.push(index_of(x+1,y));
tilemap_chunk_indices.push(index_of(x+1, y+1));
//println!("\ttriangle_two: {}", tilemap_chunk_indices.slice_from(tilemap_chunk_indices.len()-3));
}
}
self.indices_count = tilemap_chunk_indices.len() as u32;
//println!("tilemap::setup() Count of vertices: {}", tilemap_chunk_vertices.len()/2); //x,y so /2
//println!("tilemap::setup() Count of indices: {}", self.indices_count);
//println!("tilemap::setup() vertices: {}", tilemap_chunk_vertices);
//TODO shader config elsewhere?
self.shader.add_shader_file("./data/client/shader/tilemap.vs.glsl", gl::VERTEX_SHADER);
self.shader.add_shader_file("./data/client/shader/tilemap.fs.glsl", gl::FRAGMENT_SHADER);
self.shader.set_fragment_name("fragColor"); //required before linking
self.shader.link_program();
self.shader.use_program();
unsafe
{
// Create Vertex Array Object
gl::GenVertexArrays(1, &mut self.vao);
gl::BindVertexArray(self.vao);
// Create a Vertex Buffer Object and copy the vertex data to it
gl::GenBuffers(1, &mut self.vbo_vertices);
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo_vertices);
gl::BufferData(gl::ARRAY_BUFFER,
(tilemap_chunk_vertices.len() * mem::size_of::<GLfloat>()) as GLsizeiptr,
tilemap_chunk_vertices.as_ptr() as *const GLvoid,
gl::STATIC_DRAW);
// Specify the layout of the vertex data
let vertex_attr = self.shader.get_attrib("my_vertex");
gl::EnableVertexAttribArray(vertex_attr as GLuint);
gl::VertexAttribPointer(vertex_attr as GLuint, 2, gl::FLOAT,
gl::FALSE as GLboolean, 0, ptr::null());
//vertex indices
gl::GenBuffers(1, &mut self.vbo_indices);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.vbo_indices);
gl::BufferData(gl::ELEMENT_ARRAY_BUFFER, (tilemap_chunk_indices.len() * mem::size_of::<GLuint>()) as GLsizeiptr,
tilemap_chunk_indices.as_ptr() as *const GLvoid, gl::STATIC_DRAW);
//bind uniform
//disable all?:
//glBindVertexArray(0);
//glDisableVertexAttribArray
//gl::BindBuffer(*, 0)? for booth?
}
}
/*
fn set_program_variable_vbo(&self, name: &str)
{
//in
}
*/
//move to shader?
fn set_program_uniform_mat4(&self, name: &str, m: &Mat4<f32>)
{
//self.shader
let id = self.shader.get_uniform(name);
unsafe {
gl::UniformMatrix4fv(id, 1, gl::FALSE as u8, mem::transmute(m));
}
}
}
impl engine::Drawable for TilemapChunk
{
fn draw(&self, rc: &engine::RenderContext)
{
//use shader
self.shader.use_program();
let mut model : Mat4<f32> = na::zero();
math::set_identity(&mut model);
//set uniform
//let mvp = /*rc.projm **/ rc.view;
let mvp = rc.projm * rc.view * model;
self.set_program_uniform_mat4("mvp", &mvp);
//bind vao
gl::BindVertexArray(self.vao);
//render
//gl::DrawArrays(gl::TRIANGLES, 0, self.indices_count as i32);
//with indices DrawElements must be used
unsafe {
gl::DrawElements(gl::TRIANGLE_STRIP, self.indices_count as i32, gl::UNSIGNED_INT, ptr::null());
}
//GL_TRIANGLE_STRIP?
//disable all
}
}
| TilemapChunk | identifier_name |
main.rs | use std::collections::HashSet;
fn main() {
struct PentNums {
n: usize,
curr: usize
}
impl PentNums {
fn new(index: usize) -> PentNums {
if index == 0 |
PentNums{n: index, curr: PentNums::get(index)}
}
fn get(index: usize) -> usize {
(index * ((index * 3) - 1)) / 2
}
}
impl Iterator for PentNums {
type Item = usize;
fn next(&mut self) -> Option<usize> {
self.n += 1;
self.curr = (self.n * ((self.n * 3) - 1)) / 2;
Some(self.curr)
}
}
fn is_pent(num: usize, mut h: &mut HashSet<usize>) -> bool {
if h.contains(&num) {
return true
}
let mut p = PentNums::new(h.len());
for elem in p {
if num == elem {
h.insert(num);
return true;
} else if elem > num {
return false;
}
}
false
}
// assumes a and b are pentagonal
fn sum_diff_pent(a: usize, b: usize, mut h: &mut HashSet<usize>) -> bool {
if a >= b {
return false;
}
let e = a + b;
// println!("{:?}", e);
if!is_pent(e, &mut h) {
return false;
}
let d = b - a;
// println!("{:?}", d);
if!is_pent(d, &mut h) {
return false;
}
true
}
let mut pA = PentNums::new(8)
.take(2500)
.collect::<Vec<_>>();
let mut h: HashSet<usize> = HashSet::new();
for num in pA.clone() {
h.insert(num);
}
'outer: for curr in pA {
// println!("{:?}", curr);
let mut pB = PentNums::new(4)
.take(2500)
.collect::<Vec<_>>();
for elem in pB {
// println!("{:?}", elem);
if elem >= curr {
continue 'outer;
}
if sum_diff_pent(elem, curr, &mut h) {
println!("{:?}", curr - elem);
break 'outer;
}
}
}
}
| {
return PentNums{n: index, curr: 0};
} | conditional_block |
main.rs | use std::collections::HashSet;
fn main() {
struct PentNums {
n: usize,
curr: usize
}
impl PentNums {
fn new(index: usize) -> PentNums {
if index == 0 {
return PentNums{n: index, curr: 0};
}
PentNums{n: index, curr: PentNums::get(index)}
}
fn get(index: usize) -> usize {
(index * ((index * 3) - 1)) / 2
}
}
impl Iterator for PentNums {
type Item = usize;
fn next(&mut self) -> Option<usize> {
self.n += 1;
self.curr = (self.n * ((self.n * 3) - 1)) / 2;
Some(self.curr)
}
}
fn is_pent(num: usize, mut h: &mut HashSet<usize>) -> bool {
if h.contains(&num) {
return true
}
let mut p = PentNums::new(h.len());
for elem in p {
if num == elem {
h.insert(num);
return true;
} else if elem > num {
return false;
}
}
false
}
// assumes a and b are pentagonal
fn | (a: usize, b: usize, mut h: &mut HashSet<usize>) -> bool {
if a >= b {
return false;
}
let e = a + b;
// println!("{:?}", e);
if!is_pent(e, &mut h) {
return false;
}
let d = b - a;
// println!("{:?}", d);
if!is_pent(d, &mut h) {
return false;
}
true
}
let mut pA = PentNums::new(8)
.take(2500)
.collect::<Vec<_>>();
let mut h: HashSet<usize> = HashSet::new();
for num in pA.clone() {
h.insert(num);
}
'outer: for curr in pA {
// println!("{:?}", curr);
let mut pB = PentNums::new(4)
.take(2500)
.collect::<Vec<_>>();
for elem in pB {
// println!("{:?}", elem);
if elem >= curr {
continue 'outer;
}
if sum_diff_pent(elem, curr, &mut h) {
println!("{:?}", curr - elem);
break 'outer;
}
}
}
}
| sum_diff_pent | identifier_name |
main.rs | use std::collections::HashSet;
fn main() {
struct PentNums {
n: usize,
curr: usize
}
impl PentNums {
fn new(index: usize) -> PentNums {
if index == 0 {
return PentNums{n: index, curr: 0};
}
PentNums{n: index, curr: PentNums::get(index)}
}
fn get(index: usize) -> usize {
(index * ((index * 3) - 1)) / 2
}
}
impl Iterator for PentNums {
type Item = usize;
fn next(&mut self) -> Option<usize> {
self.n += 1;
self.curr = (self.n * ((self.n * 3) - 1)) / 2;
Some(self.curr)
}
}
fn is_pent(num: usize, mut h: &mut HashSet<usize>) -> bool |
// assumes a and b are pentagonal
fn sum_diff_pent(a: usize, b: usize, mut h: &mut HashSet<usize>) -> bool {
if a >= b {
return false;
}
let e = a + b;
// println!("{:?}", e);
if!is_pent(e, &mut h) {
return false;
}
let d = b - a;
// println!("{:?}", d);
if!is_pent(d, &mut h) {
return false;
}
true
}
let mut pA = PentNums::new(8)
.take(2500)
.collect::<Vec<_>>();
let mut h: HashSet<usize> = HashSet::new();
for num in pA.clone() {
h.insert(num);
}
'outer: for curr in pA {
// println!("{:?}", curr);
let mut pB = PentNums::new(4)
.take(2500)
.collect::<Vec<_>>();
for elem in pB {
// println!("{:?}", elem);
if elem >= curr {
continue 'outer;
}
if sum_diff_pent(elem, curr, &mut h) {
println!("{:?}", curr - elem);
break 'outer;
}
}
}
}
| {
if h.contains(&num) {
return true
}
let mut p = PentNums::new(h.len());
for elem in p {
if num == elem {
h.insert(num);
return true;
} else if elem > num {
return false;
}
}
false
} | identifier_body |
main.rs | use std::collections::HashSet;
fn main() {
struct PentNums {
n: usize,
curr: usize
}
impl PentNums {
fn new(index: usize) -> PentNums {
if index == 0 {
return PentNums{n: index, curr: 0};
}
PentNums{n: index, curr: PentNums::get(index)}
}
fn get(index: usize) -> usize {
(index * ((index * 3) - 1)) / 2
}
}
impl Iterator for PentNums {
type Item = usize;
fn next(&mut self) -> Option<usize> {
self.n += 1;
self.curr = (self.n * ((self.n * 3) - 1)) / 2;
Some(self.curr)
}
}
fn is_pent(num: usize, mut h: &mut HashSet<usize>) -> bool {
if h.contains(&num) {
return true
}
let mut p = PentNums::new(h.len());
for elem in p {
if num == elem {
h.insert(num);
return true;
} else if elem > num {
return false;
}
}
false
}
// assumes a and b are pentagonal
fn sum_diff_pent(a: usize, b: usize, mut h: &mut HashSet<usize>) -> bool {
if a >= b {
return false;
}
let e = a + b;
// println!("{:?}", e);
if!is_pent(e, &mut h) {
return false;
}
let d = b - a;
// println!("{:?}", d);
if!is_pent(d, &mut h) {
return false;
}
true
}
let mut pA = PentNums::new(8)
.take(2500)
.collect::<Vec<_>>();
let mut h: HashSet<usize> = HashSet::new();
for num in pA.clone() { | }
'outer: for curr in pA {
// println!("{:?}", curr);
let mut pB = PentNums::new(4)
.take(2500)
.collect::<Vec<_>>();
for elem in pB {
// println!("{:?}", elem);
if elem >= curr {
continue 'outer;
}
if sum_diff_pent(elem, curr, &mut h) {
println!("{:?}", curr - elem);
break 'outer;
}
}
}
} | h.insert(num); | random_line_split |
builder.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Helper module to build up a selector safely and efficiently.
//!
//! Our selector representation is designed to optimize matching, and has
//! several requirements:
//! * All simple selectors and combinators are stored inline in the same buffer
//! as Component instances.
//! * We store the top-level compound selectors from right to left, i.e. in
//! matching order.
//! * We store the simple selectors for each combinator from left to right, so
//! that we match the cheaper simple selectors first.
//!
//! Meeting all these constraints without extra memmove traffic during parsing
//! is non-trivial. This module encapsulates those details and presents an
//! easy-to-use API for the parser.
use parser::{Combinator, Component, SelectorImpl};
use servo_arc::{Arc, HeaderWithLength, ThinArc};
use sink::Push;
use smallvec::{self, SmallVec};
use std::cmp;
use std::iter;
use std::ops::{AddAssign, Add};
use std::ptr;
use std::slice;
/// Top-level SelectorBuilder struct. This should be stack-allocated by the
/// consumer and never moved (because it contains a lot of inline data that
/// would be slow to memmov).
///
/// After instantation, callers may call the push_simple_selector() and
/// push_combinator() methods to append selector data as it is encountered
/// (from left to right). Once the process is complete, callers should invoke
/// build(), which transforms the contents of the SelectorBuilder into a heap-
/// allocated Selector and leaves the builder in a drained state.
#[derive(Debug)]
pub struct SelectorBuilder<Impl: SelectorImpl> {
/// The entire sequence of simple selectors, from left to right, without combinators.
///
/// We make this large because the result of parsing a selector is fed into a new
/// Arc-ed allocation, so any spilled vec would be a wasted allocation. Also,
/// Components are large enough that we don't have much cache locality benefit
/// from reserving stack space for fewer of them.
simple_selectors: SmallVec<[Component<Impl>; 32]>,
/// The combinators, and the length of the compound selector to their left.
combinators: SmallVec<[(Combinator, usize); 16]>,
/// The length of the current compount selector.
current_len: usize,
}
impl<Impl: SelectorImpl> Default for SelectorBuilder<Impl> {
#[inline(always)]
fn default() -> Self {
SelectorBuilder {
simple_selectors: SmallVec::new(),
combinators: SmallVec::new(),
current_len: 0,
}
}
}
impl<Impl: SelectorImpl> Push<Component<Impl>> for SelectorBuilder<Impl> {
fn push(&mut self, value: Component<Impl>) {
self.push_simple_selector(value);
}
}
impl<Impl: SelectorImpl> SelectorBuilder<Impl> {
/// Pushes a simple selector onto the current compound selector.
#[inline(always)]
pub fn push_simple_selector(&mut self, ss: Component<Impl>) {
debug_assert!(!ss.is_combinator());
self.simple_selectors.push(ss);
self.current_len += 1;
}
/// Completes the current compound selector and starts a new one, delimited
/// by the given combinator.
#[inline(always)]
pub fn push_combinator(&mut self, c: Combinator) {
self.combinators.push((c, self.current_len));
self.current_len = 0;
}
/// Returns true if no simple selectors have ever been pushed to this builder.
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.simple_selectors.is_empty()
}
/// Returns true if combinators have ever been pushed to this builder.
#[inline(always)]
pub fn has_combinators(&self) -> bool {
!self.combinators.is_empty()
}
/// Consumes the builder, producing a Selector.
#[inline(always)]
pub fn build(
&mut self,
parsed_pseudo: bool,
parsed_slotted: bool,
) -> ThinArc<SpecificityAndFlags, Component<Impl>> {
// Compute the specificity and flags.
let mut spec = SpecificityAndFlags(specificity(&*self, self.simple_selectors.iter()));
if parsed_pseudo {
spec.0 |= HAS_PSEUDO_BIT;
}
if parsed_slotted {
spec.0 |= HAS_SLOTTED_BIT;
}
self.build_with_specificity_and_flags(spec)
}
/// Builds with an explicit SpecificityAndFlags. This is separated from build() so
/// that unit tests can pass an explicit specificity.
#[inline(always)]
pub fn build_with_specificity_and_flags(
&mut self,
spec: SpecificityAndFlags,
) -> ThinArc<SpecificityAndFlags, Component<Impl>> {
// First, compute the total number of Components we'll need to allocate
// space for.
let full_len = self.simple_selectors.len() + self.combinators.len();
// Create the header.
let header = HeaderWithLength::new(spec, full_len);
// Create the Arc using an iterator that drains our buffers.
// Use a raw pointer to be able to call set_len despite "borrowing" the slice.
// This is similar to SmallVec::drain, but we use a slice here because
// we’re gonna traverse it non-linearly.
let raw_simple_selectors: *const [Component<Impl>] = &*self.simple_selectors;
unsafe {
// Panic-safety: if SelectorBuilderIter is not iterated to the end,
// some simple selectors will safely leak.
self.simple_selectors.set_len(0)
}
let (rest, current) = split_from_end(unsafe { &*raw_simple_selectors }, self.current_len);
let iter = SelectorBuilderIter {
current_simple_selectors: current.iter(),
rest_of_simple_selectors: rest,
combinators: self.combinators.drain().rev(),
};
Arc::into_thin(Arc::from_header_and_iter(header, iter))
}
}
struct SelectorBuilderIter<'a, Impl: SelectorImpl> {
current_simple_selectors: slice::Iter<'a, Component<Impl>>,
rest_of_simple_selectors: &'a [Component<Impl>],
combinators: iter::Rev<smallvec::Drain<'a, (Combinator, usize)>>,
}
impl<'a, Impl: SelectorImpl> ExactSizeIterator for SelectorBuilderIter<'a, Impl> {
fn len(&self) -> usize {
self.current_simple_selectors.len() + self.rest_of_simple_selectors.len() +
self.combinators.len()
}
}
impl<'a, Impl: SelectorImpl> Iterator for SelectorBuilderIter<'a, Impl> {
type Item = Component<Impl>;
#[inline(always)]
fn next(&mut self) -> Option<Self::Item> {
if let Some(simple_selector_ref) = self.current_simple_selectors.next() {
// Move a simple selector out of this slice iterator.
// This is safe because we’ve called SmallVec::set_len(0) above,
// so SmallVec::drop won’t drop this simple selector.
unsafe { Some(ptr::read(simple_selector_ref)) }
} else {
self.combinators.next().map(|(combinator, len)| {
let (rest, current) = split_from_end(self.rest_of_simple_selectors, len);
self.rest_of_simple_selectors = rest;
self.current_simple_selectors = current.iter();
Component::Combinator(combinator)
})
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
}
fn split_from_end<T>(s: &[T], at: usize) -> (&[T], &[T]) {
s.split_at(s.len() - at)
}
pub const HAS_PSEUDO_BIT: u32 = 1 << 30;
pub const HAS_SLOTTED_BIT: u32 = 1 << 31;
/// We use ten bits for each specificity kind (id, class, element), and the two
/// high bits for the pseudo and slotted flags.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct SpecificityAndFlags(pub u32);
impl SpecificityAndFlags {
#[inline]
pub fn specificity(&self) -> u32 {
self.0 &!(HAS_PSEUDO_BIT | HAS_SLOTTED_BIT)
}
#[inline]
pub fn has_pseudo_element(&self) -> bool {
(self.0 & HAS_PSEUDO_BIT)!= 0
}
#[inline]
pub fn is_slotted(&self) -> bool {
(self.0 & HAS_SLOTTED_BIT)!= 0
}
}
const MAX_10BIT: u32 = (1u32 << 10) - 1;
#[derive(Clone, Copy, Eq, Ord, PartialEq, PartialOrd)]
struct Specificity {
id_selectors: u32,
class_like_selectors: u32,
element_selectors: u32,
}
impl AddAssign for Specificity {
#[inline]
fn add_assign(&mut self, rhs: Self) {
self.id_selectors += rhs.id_selectors;
self.class_like_selectors += rhs.class_like_selectors;
self.element_selectors += rhs.element_selectors;
}
}
impl Add for Specificity {
type Output = Specificity;
fn add(self, rhs: Specificity) -> Specificity {
Specificity {
id_selectors: self.id_selectors + rhs.id_selectors,
class_like_selectors: self.class_like_selectors + rhs.class_like_selectors,
element_selectors: self.element_selectors + rhs.element_selectors,
}
}
}
impl Default for Specificity {
fn default() -> Specificity {
Specificity {
id_selectors: 0,
class_like_selectors: 0,
element_selectors: 0,
}
}
}
impl From<u32> for Specificity {
#[inline]
fn from(value: u32) -> Specificity {
assert!(value <= MAX_10BIT << 20 | MAX_10BIT << 10 | MAX_10BIT);
Specificity {
id_selectors: value >> 20,
class_like_selectors: (value >> 10) & MAX_10BIT,
element_selectors: value & MAX_10BIT,
}
}
}
impl From<Specificity> for u32 {
#[inline]
fn from(specificity: Specificity) -> u32 {
cmp::min(specificity.id_selectors, MAX_10BIT) << 20 |
cmp::min(specificity.class_like_selectors, MAX_10BIT) << 10 |
cmp::min(specificity.element_selectors, MAX_10BIT)
}
}
fn specificity<Impl>(builder: &SelectorBuilder<Impl>, iter: slice::Iter<Component<Impl>>) -> u32
where
Impl: SelectorImpl,
{
complex_selector_specificity(builder, iter).into()
}
fn complex_selector_specificity<Impl>(
builder: &SelectorBuilder<Impl>,
mut iter: slice::Iter<Component<Impl>>,
) -> Specificity
where
Impl: SelectorImpl,
{
fn simple_selector_specificity<Impl>(
builder: &SelectorBuilder<Impl>,
simple_selector: &Component<Impl>,
specificity: &mut Specificity,
) where
Impl: SelectorImpl,
{
match *simple_selector {
Component::Combinator(ref combinator) => { | combinator,
builder,
);
}
Component::PseudoElement(..) | Component::LocalName(..) => {
specificity.element_selectors += 1
},
Component::Slotted(ref selector) => {
specificity.element_selectors += 1;
// Note that due to the way ::slotted works we only compete with
// other ::slotted rules, so the above rule doesn't really
// matter, but we do it still for consistency with other
// pseudo-elements.
//
// See: https://github.com/w3c/csswg-drafts/issues/1915
*specificity += Specificity::from(selector.specificity());
},
Component::Host(ref selector) => {
specificity.class_like_selectors += 1;
if let Some(ref selector) = *selector {
// See: https://github.com/w3c/csswg-drafts/issues/1915
*specificity += Specificity::from(selector.specificity());
}
}
Component::ID(..) => {
specificity.id_selectors += 1;
},
Component::Class(..) |
Component::AttributeInNoNamespace {.. } |
Component::AttributeInNoNamespaceExists {.. } |
Component::AttributeOther(..) |
Component::FirstChild |
Component::LastChild |
Component::OnlyChild |
Component::Root |
Component::Empty |
Component::Scope |
Component::NthChild(..) |
Component::NthLastChild(..) |
Component::NthOfType(..) |
Component::NthLastOfType(..) |
Component::FirstOfType |
Component::LastOfType |
Component::OnlyOfType |
Component::NonTSPseudoClass(..) => {
specificity.class_like_selectors += 1;
},
Component::ExplicitUniversalType |
Component::ExplicitAnyNamespace |
Component::ExplicitNoNamespace |
Component::DefaultNamespace(..) |
Component::Namespace(..) => {
// Does not affect specificity
},
Component::Negation(ref negated) => {
for ss in negated.iter() {
simple_selector_specificity(builder, &ss, specificity);
}
},
}
}
let mut specificity = Default::default();
for simple_selector in &mut iter {
simple_selector_specificity(builder, &simple_selector, &mut specificity);
}
specificity
} | unreachable!(
"Found combinator {:?} in simple selectors vector? {:?}", | random_line_split |
builder.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Helper module to build up a selector safely and efficiently.
//!
//! Our selector representation is designed to optimize matching, and has
//! several requirements:
//! * All simple selectors and combinators are stored inline in the same buffer
//! as Component instances.
//! * We store the top-level compound selectors from right to left, i.e. in
//! matching order.
//! * We store the simple selectors for each combinator from left to right, so
//! that we match the cheaper simple selectors first.
//!
//! Meeting all these constraints without extra memmove traffic during parsing
//! is non-trivial. This module encapsulates those details and presents an
//! easy-to-use API for the parser.
use parser::{Combinator, Component, SelectorImpl};
use servo_arc::{Arc, HeaderWithLength, ThinArc};
use sink::Push;
use smallvec::{self, SmallVec};
use std::cmp;
use std::iter;
use std::ops::{AddAssign, Add};
use std::ptr;
use std::slice;
/// Top-level SelectorBuilder struct. This should be stack-allocated by the
/// consumer and never moved (because it contains a lot of inline data that
/// would be slow to memmov).
///
/// After instantation, callers may call the push_simple_selector() and
/// push_combinator() methods to append selector data as it is encountered
/// (from left to right). Once the process is complete, callers should invoke
/// build(), which transforms the contents of the SelectorBuilder into a heap-
/// allocated Selector and leaves the builder in a drained state.
#[derive(Debug)]
pub struct SelectorBuilder<Impl: SelectorImpl> {
/// The entire sequence of simple selectors, from left to right, without combinators.
///
/// We make this large because the result of parsing a selector is fed into a new
/// Arc-ed allocation, so any spilled vec would be a wasted allocation. Also,
/// Components are large enough that we don't have much cache locality benefit
/// from reserving stack space for fewer of them.
simple_selectors: SmallVec<[Component<Impl>; 32]>,
/// The combinators, and the length of the compound selector to their left.
combinators: SmallVec<[(Combinator, usize); 16]>,
/// The length of the current compount selector.
current_len: usize,
}
impl<Impl: SelectorImpl> Default for SelectorBuilder<Impl> {
#[inline(always)]
fn default() -> Self {
SelectorBuilder {
simple_selectors: SmallVec::new(),
combinators: SmallVec::new(),
current_len: 0,
}
}
}
impl<Impl: SelectorImpl> Push<Component<Impl>> for SelectorBuilder<Impl> {
fn push(&mut self, value: Component<Impl>) {
self.push_simple_selector(value);
}
}
impl<Impl: SelectorImpl> SelectorBuilder<Impl> {
/// Pushes a simple selector onto the current compound selector.
#[inline(always)]
pub fn push_simple_selector(&mut self, ss: Component<Impl>) {
debug_assert!(!ss.is_combinator());
self.simple_selectors.push(ss);
self.current_len += 1;
}
/// Completes the current compound selector and starts a new one, delimited
/// by the given combinator.
#[inline(always)]
pub fn push_combinator(&mut self, c: Combinator) {
self.combinators.push((c, self.current_len));
self.current_len = 0;
}
/// Returns true if no simple selectors have ever been pushed to this builder.
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.simple_selectors.is_empty()
}
/// Returns true if combinators have ever been pushed to this builder.
#[inline(always)]
pub fn has_combinators(&self) -> bool {
!self.combinators.is_empty()
}
/// Consumes the builder, producing a Selector.
#[inline(always)]
pub fn build(
&mut self,
parsed_pseudo: bool,
parsed_slotted: bool,
) -> ThinArc<SpecificityAndFlags, Component<Impl>> {
// Compute the specificity and flags.
let mut spec = SpecificityAndFlags(specificity(&*self, self.simple_selectors.iter()));
if parsed_pseudo {
spec.0 |= HAS_PSEUDO_BIT;
}
if parsed_slotted {
spec.0 |= HAS_SLOTTED_BIT;
}
self.build_with_specificity_and_flags(spec)
}
/// Builds with an explicit SpecificityAndFlags. This is separated from build() so
/// that unit tests can pass an explicit specificity.
#[inline(always)]
pub fn build_with_specificity_and_flags(
&mut self,
spec: SpecificityAndFlags,
) -> ThinArc<SpecificityAndFlags, Component<Impl>> {
// First, compute the total number of Components we'll need to allocate
// space for.
let full_len = self.simple_selectors.len() + self.combinators.len();
// Create the header.
let header = HeaderWithLength::new(spec, full_len);
// Create the Arc using an iterator that drains our buffers.
// Use a raw pointer to be able to call set_len despite "borrowing" the slice.
// This is similar to SmallVec::drain, but we use a slice here because
// we’re gonna traverse it non-linearly.
let raw_simple_selectors: *const [Component<Impl>] = &*self.simple_selectors;
unsafe {
// Panic-safety: if SelectorBuilderIter is not iterated to the end,
// some simple selectors will safely leak.
self.simple_selectors.set_len(0)
}
let (rest, current) = split_from_end(unsafe { &*raw_simple_selectors }, self.current_len);
let iter = SelectorBuilderIter {
current_simple_selectors: current.iter(),
rest_of_simple_selectors: rest,
combinators: self.combinators.drain().rev(),
};
Arc::into_thin(Arc::from_header_and_iter(header, iter))
}
}
struct SelectorBuilderIter<'a, Impl: SelectorImpl> {
current_simple_selectors: slice::Iter<'a, Component<Impl>>,
rest_of_simple_selectors: &'a [Component<Impl>],
combinators: iter::Rev<smallvec::Drain<'a, (Combinator, usize)>>,
}
impl<'a, Impl: SelectorImpl> ExactSizeIterator for SelectorBuilderIter<'a, Impl> {
fn le | self) -> usize {
self.current_simple_selectors.len() + self.rest_of_simple_selectors.len() +
self.combinators.len()
}
}
impl<'a, Impl: SelectorImpl> Iterator for SelectorBuilderIter<'a, Impl> {
type Item = Component<Impl>;
#[inline(always)]
fn next(&mut self) -> Option<Self::Item> {
if let Some(simple_selector_ref) = self.current_simple_selectors.next() {
// Move a simple selector out of this slice iterator.
// This is safe because we’ve called SmallVec::set_len(0) above,
// so SmallVec::drop won’t drop this simple selector.
unsafe { Some(ptr::read(simple_selector_ref)) }
} else {
self.combinators.next().map(|(combinator, len)| {
let (rest, current) = split_from_end(self.rest_of_simple_selectors, len);
self.rest_of_simple_selectors = rest;
self.current_simple_selectors = current.iter();
Component::Combinator(combinator)
})
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
}
fn split_from_end<T>(s: &[T], at: usize) -> (&[T], &[T]) {
s.split_at(s.len() - at)
}
pub const HAS_PSEUDO_BIT: u32 = 1 << 30;
pub const HAS_SLOTTED_BIT: u32 = 1 << 31;
/// We use ten bits for each specificity kind (id, class, element), and the two
/// high bits for the pseudo and slotted flags.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct SpecificityAndFlags(pub u32);
impl SpecificityAndFlags {
#[inline]
pub fn specificity(&self) -> u32 {
self.0 &!(HAS_PSEUDO_BIT | HAS_SLOTTED_BIT)
}
#[inline]
pub fn has_pseudo_element(&self) -> bool {
(self.0 & HAS_PSEUDO_BIT)!= 0
}
#[inline]
pub fn is_slotted(&self) -> bool {
(self.0 & HAS_SLOTTED_BIT)!= 0
}
}
const MAX_10BIT: u32 = (1u32 << 10) - 1;
#[derive(Clone, Copy, Eq, Ord, PartialEq, PartialOrd)]
struct Specificity {
id_selectors: u32,
class_like_selectors: u32,
element_selectors: u32,
}
impl AddAssign for Specificity {
#[inline]
fn add_assign(&mut self, rhs: Self) {
self.id_selectors += rhs.id_selectors;
self.class_like_selectors += rhs.class_like_selectors;
self.element_selectors += rhs.element_selectors;
}
}
impl Add for Specificity {
type Output = Specificity;
fn add(self, rhs: Specificity) -> Specificity {
Specificity {
id_selectors: self.id_selectors + rhs.id_selectors,
class_like_selectors: self.class_like_selectors + rhs.class_like_selectors,
element_selectors: self.element_selectors + rhs.element_selectors,
}
}
}
impl Default for Specificity {
fn default() -> Specificity {
Specificity {
id_selectors: 0,
class_like_selectors: 0,
element_selectors: 0,
}
}
}
impl From<u32> for Specificity {
#[inline]
fn from(value: u32) -> Specificity {
assert!(value <= MAX_10BIT << 20 | MAX_10BIT << 10 | MAX_10BIT);
Specificity {
id_selectors: value >> 20,
class_like_selectors: (value >> 10) & MAX_10BIT,
element_selectors: value & MAX_10BIT,
}
}
}
impl From<Specificity> for u32 {
#[inline]
fn from(specificity: Specificity) -> u32 {
cmp::min(specificity.id_selectors, MAX_10BIT) << 20 |
cmp::min(specificity.class_like_selectors, MAX_10BIT) << 10 |
cmp::min(specificity.element_selectors, MAX_10BIT)
}
}
fn specificity<Impl>(builder: &SelectorBuilder<Impl>, iter: slice::Iter<Component<Impl>>) -> u32
where
Impl: SelectorImpl,
{
complex_selector_specificity(builder, iter).into()
}
fn complex_selector_specificity<Impl>(
builder: &SelectorBuilder<Impl>,
mut iter: slice::Iter<Component<Impl>>,
) -> Specificity
where
Impl: SelectorImpl,
{
fn simple_selector_specificity<Impl>(
builder: &SelectorBuilder<Impl>,
simple_selector: &Component<Impl>,
specificity: &mut Specificity,
) where
Impl: SelectorImpl,
{
match *simple_selector {
Component::Combinator(ref combinator) => {
unreachable!(
"Found combinator {:?} in simple selectors vector? {:?}",
combinator,
builder,
);
}
Component::PseudoElement(..) | Component::LocalName(..) => {
specificity.element_selectors += 1
},
Component::Slotted(ref selector) => {
specificity.element_selectors += 1;
// Note that due to the way ::slotted works we only compete with
// other ::slotted rules, so the above rule doesn't really
// matter, but we do it still for consistency with other
// pseudo-elements.
//
// See: https://github.com/w3c/csswg-drafts/issues/1915
*specificity += Specificity::from(selector.specificity());
},
Component::Host(ref selector) => {
specificity.class_like_selectors += 1;
if let Some(ref selector) = *selector {
// See: https://github.com/w3c/csswg-drafts/issues/1915
*specificity += Specificity::from(selector.specificity());
}
}
Component::ID(..) => {
specificity.id_selectors += 1;
},
Component::Class(..) |
Component::AttributeInNoNamespace {.. } |
Component::AttributeInNoNamespaceExists {.. } |
Component::AttributeOther(..) |
Component::FirstChild |
Component::LastChild |
Component::OnlyChild |
Component::Root |
Component::Empty |
Component::Scope |
Component::NthChild(..) |
Component::NthLastChild(..) |
Component::NthOfType(..) |
Component::NthLastOfType(..) |
Component::FirstOfType |
Component::LastOfType |
Component::OnlyOfType |
Component::NonTSPseudoClass(..) => {
specificity.class_like_selectors += 1;
},
Component::ExplicitUniversalType |
Component::ExplicitAnyNamespace |
Component::ExplicitNoNamespace |
Component::DefaultNamespace(..) |
Component::Namespace(..) => {
// Does not affect specificity
},
Component::Negation(ref negated) => {
for ss in negated.iter() {
simple_selector_specificity(builder, &ss, specificity);
}
},
}
}
let mut specificity = Default::default();
for simple_selector in &mut iter {
simple_selector_specificity(builder, &simple_selector, &mut specificity);
}
specificity
}
| n(& | identifier_name |
builder.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Helper module to build up a selector safely and efficiently.
//!
//! Our selector representation is designed to optimize matching, and has
//! several requirements:
//! * All simple selectors and combinators are stored inline in the same buffer
//! as Component instances.
//! * We store the top-level compound selectors from right to left, i.e. in
//! matching order.
//! * We store the simple selectors for each combinator from left to right, so
//! that we match the cheaper simple selectors first.
//!
//! Meeting all these constraints without extra memmove traffic during parsing
//! is non-trivial. This module encapsulates those details and presents an
//! easy-to-use API for the parser.
use parser::{Combinator, Component, SelectorImpl};
use servo_arc::{Arc, HeaderWithLength, ThinArc};
use sink::Push;
use smallvec::{self, SmallVec};
use std::cmp;
use std::iter;
use std::ops::{AddAssign, Add};
use std::ptr;
use std::slice;
/// Top-level SelectorBuilder struct. This should be stack-allocated by the
/// consumer and never moved (because it contains a lot of inline data that
/// would be slow to memmov).
///
/// After instantation, callers may call the push_simple_selector() and
/// push_combinator() methods to append selector data as it is encountered
/// (from left to right). Once the process is complete, callers should invoke
/// build(), which transforms the contents of the SelectorBuilder into a heap-
/// allocated Selector and leaves the builder in a drained state.
#[derive(Debug)]
pub struct SelectorBuilder<Impl: SelectorImpl> {
/// The entire sequence of simple selectors, from left to right, without combinators.
///
/// We make this large because the result of parsing a selector is fed into a new
/// Arc-ed allocation, so any spilled vec would be a wasted allocation. Also,
/// Components are large enough that we don't have much cache locality benefit
/// from reserving stack space for fewer of them.
simple_selectors: SmallVec<[Component<Impl>; 32]>,
/// The combinators, and the length of the compound selector to their left.
combinators: SmallVec<[(Combinator, usize); 16]>,
/// The length of the current compount selector.
current_len: usize,
}
impl<Impl: SelectorImpl> Default for SelectorBuilder<Impl> {
#[inline(always)]
fn default() -> Self {
SelectorBuilder {
simple_selectors: SmallVec::new(),
combinators: SmallVec::new(),
current_len: 0,
}
}
}
impl<Impl: SelectorImpl> Push<Component<Impl>> for SelectorBuilder<Impl> {
fn push(&mut self, value: Component<Impl>) {
self.push_simple_selector(value);
}
}
impl<Impl: SelectorImpl> SelectorBuilder<Impl> {
/// Pushes a simple selector onto the current compound selector.
#[inline(always)]
pub fn push_simple_selector(&mut self, ss: Component<Impl>) {
debug_assert!(!ss.is_combinator());
self.simple_selectors.push(ss);
self.current_len += 1;
}
/// Completes the current compound selector and starts a new one, delimited
/// by the given combinator.
#[inline(always)]
pub fn push_combinator(&mut self, c: Combinator) {
self.combinators.push((c, self.current_len));
self.current_len = 0;
}
/// Returns true if no simple selectors have ever been pushed to this builder.
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.simple_selectors.is_empty()
}
/// Returns true if combinators have ever been pushed to this builder.
#[inline(always)]
pub fn has_combinators(&self) -> bool {
!self.combinators.is_empty()
}
/// Consumes the builder, producing a Selector.
#[inline(always)]
pub fn build(
&mut self,
parsed_pseudo: bool,
parsed_slotted: bool,
) -> ThinArc<SpecificityAndFlags, Component<Impl>> {
// Compute the specificity and flags.
let mut spec = SpecificityAndFlags(specificity(&*self, self.simple_selectors.iter()));
if parsed_pseudo {
spec.0 |= HAS_PSEUDO_BIT;
}
if parsed_slotted |
self.build_with_specificity_and_flags(spec)
}
/// Builds with an explicit SpecificityAndFlags. This is separated from build() so
/// that unit tests can pass an explicit specificity.
#[inline(always)]
pub fn build_with_specificity_and_flags(
&mut self,
spec: SpecificityAndFlags,
) -> ThinArc<SpecificityAndFlags, Component<Impl>> {
// First, compute the total number of Components we'll need to allocate
// space for.
let full_len = self.simple_selectors.len() + self.combinators.len();
// Create the header.
let header = HeaderWithLength::new(spec, full_len);
// Create the Arc using an iterator that drains our buffers.
// Use a raw pointer to be able to call set_len despite "borrowing" the slice.
// This is similar to SmallVec::drain, but we use a slice here because
// we’re gonna traverse it non-linearly.
let raw_simple_selectors: *const [Component<Impl>] = &*self.simple_selectors;
unsafe {
// Panic-safety: if SelectorBuilderIter is not iterated to the end,
// some simple selectors will safely leak.
self.simple_selectors.set_len(0)
}
let (rest, current) = split_from_end(unsafe { &*raw_simple_selectors }, self.current_len);
let iter = SelectorBuilderIter {
current_simple_selectors: current.iter(),
rest_of_simple_selectors: rest,
combinators: self.combinators.drain().rev(),
};
Arc::into_thin(Arc::from_header_and_iter(header, iter))
}
}
struct SelectorBuilderIter<'a, Impl: SelectorImpl> {
current_simple_selectors: slice::Iter<'a, Component<Impl>>,
rest_of_simple_selectors: &'a [Component<Impl>],
combinators: iter::Rev<smallvec::Drain<'a, (Combinator, usize)>>,
}
impl<'a, Impl: SelectorImpl> ExactSizeIterator for SelectorBuilderIter<'a, Impl> {
fn len(&self) -> usize {
self.current_simple_selectors.len() + self.rest_of_simple_selectors.len() +
self.combinators.len()
}
}
impl<'a, Impl: SelectorImpl> Iterator for SelectorBuilderIter<'a, Impl> {
type Item = Component<Impl>;
#[inline(always)]
fn next(&mut self) -> Option<Self::Item> {
if let Some(simple_selector_ref) = self.current_simple_selectors.next() {
// Move a simple selector out of this slice iterator.
// This is safe because we’ve called SmallVec::set_len(0) above,
// so SmallVec::drop won’t drop this simple selector.
unsafe { Some(ptr::read(simple_selector_ref)) }
} else {
self.combinators.next().map(|(combinator, len)| {
let (rest, current) = split_from_end(self.rest_of_simple_selectors, len);
self.rest_of_simple_selectors = rest;
self.current_simple_selectors = current.iter();
Component::Combinator(combinator)
})
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
}
fn split_from_end<T>(s: &[T], at: usize) -> (&[T], &[T]) {
s.split_at(s.len() - at)
}
pub const HAS_PSEUDO_BIT: u32 = 1 << 30;
pub const HAS_SLOTTED_BIT: u32 = 1 << 31;
/// We use ten bits for each specificity kind (id, class, element), and the two
/// high bits for the pseudo and slotted flags.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct SpecificityAndFlags(pub u32);
impl SpecificityAndFlags {
#[inline]
pub fn specificity(&self) -> u32 {
self.0 &!(HAS_PSEUDO_BIT | HAS_SLOTTED_BIT)
}
#[inline]
pub fn has_pseudo_element(&self) -> bool {
(self.0 & HAS_PSEUDO_BIT)!= 0
}
#[inline]
pub fn is_slotted(&self) -> bool {
(self.0 & HAS_SLOTTED_BIT)!= 0
}
}
const MAX_10BIT: u32 = (1u32 << 10) - 1;
#[derive(Clone, Copy, Eq, Ord, PartialEq, PartialOrd)]
struct Specificity {
id_selectors: u32,
class_like_selectors: u32,
element_selectors: u32,
}
impl AddAssign for Specificity {
#[inline]
fn add_assign(&mut self, rhs: Self) {
self.id_selectors += rhs.id_selectors;
self.class_like_selectors += rhs.class_like_selectors;
self.element_selectors += rhs.element_selectors;
}
}
impl Add for Specificity {
type Output = Specificity;
fn add(self, rhs: Specificity) -> Specificity {
Specificity {
id_selectors: self.id_selectors + rhs.id_selectors,
class_like_selectors: self.class_like_selectors + rhs.class_like_selectors,
element_selectors: self.element_selectors + rhs.element_selectors,
}
}
}
impl Default for Specificity {
fn default() -> Specificity {
Specificity {
id_selectors: 0,
class_like_selectors: 0,
element_selectors: 0,
}
}
}
impl From<u32> for Specificity {
#[inline]
fn from(value: u32) -> Specificity {
assert!(value <= MAX_10BIT << 20 | MAX_10BIT << 10 | MAX_10BIT);
Specificity {
id_selectors: value >> 20,
class_like_selectors: (value >> 10) & MAX_10BIT,
element_selectors: value & MAX_10BIT,
}
}
}
impl From<Specificity> for u32 {
#[inline]
fn from(specificity: Specificity) -> u32 {
cmp::min(specificity.id_selectors, MAX_10BIT) << 20 |
cmp::min(specificity.class_like_selectors, MAX_10BIT) << 10 |
cmp::min(specificity.element_selectors, MAX_10BIT)
}
}
fn specificity<Impl>(builder: &SelectorBuilder<Impl>, iter: slice::Iter<Component<Impl>>) -> u32
where
Impl: SelectorImpl,
{
complex_selector_specificity(builder, iter).into()
}
fn complex_selector_specificity<Impl>(
builder: &SelectorBuilder<Impl>,
mut iter: slice::Iter<Component<Impl>>,
) -> Specificity
where
Impl: SelectorImpl,
{
fn simple_selector_specificity<Impl>(
builder: &SelectorBuilder<Impl>,
simple_selector: &Component<Impl>,
specificity: &mut Specificity,
) where
Impl: SelectorImpl,
{
match *simple_selector {
Component::Combinator(ref combinator) => {
unreachable!(
"Found combinator {:?} in simple selectors vector? {:?}",
combinator,
builder,
);
}
Component::PseudoElement(..) | Component::LocalName(..) => {
specificity.element_selectors += 1
},
Component::Slotted(ref selector) => {
specificity.element_selectors += 1;
// Note that due to the way ::slotted works we only compete with
// other ::slotted rules, so the above rule doesn't really
// matter, but we do it still for consistency with other
// pseudo-elements.
//
// See: https://github.com/w3c/csswg-drafts/issues/1915
*specificity += Specificity::from(selector.specificity());
},
Component::Host(ref selector) => {
specificity.class_like_selectors += 1;
if let Some(ref selector) = *selector {
// See: https://github.com/w3c/csswg-drafts/issues/1915
*specificity += Specificity::from(selector.specificity());
}
}
Component::ID(..) => {
specificity.id_selectors += 1;
},
Component::Class(..) |
Component::AttributeInNoNamespace {.. } |
Component::AttributeInNoNamespaceExists {.. } |
Component::AttributeOther(..) |
Component::FirstChild |
Component::LastChild |
Component::OnlyChild |
Component::Root |
Component::Empty |
Component::Scope |
Component::NthChild(..) |
Component::NthLastChild(..) |
Component::NthOfType(..) |
Component::NthLastOfType(..) |
Component::FirstOfType |
Component::LastOfType |
Component::OnlyOfType |
Component::NonTSPseudoClass(..) => {
specificity.class_like_selectors += 1;
},
Component::ExplicitUniversalType |
Component::ExplicitAnyNamespace |
Component::ExplicitNoNamespace |
Component::DefaultNamespace(..) |
Component::Namespace(..) => {
// Does not affect specificity
},
Component::Negation(ref negated) => {
for ss in negated.iter() {
simple_selector_specificity(builder, &ss, specificity);
}
},
}
}
let mut specificity = Default::default();
for simple_selector in &mut iter {
simple_selector_specificity(builder, &simple_selector, &mut specificity);
}
specificity
}
| {
spec.0 |= HAS_SLOTTED_BIT;
} | conditional_block |
builder.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Helper module to build up a selector safely and efficiently.
//!
//! Our selector representation is designed to optimize matching, and has
//! several requirements:
//! * All simple selectors and combinators are stored inline in the same buffer
//! as Component instances.
//! * We store the top-level compound selectors from right to left, i.e. in
//! matching order.
//! * We store the simple selectors for each combinator from left to right, so
//! that we match the cheaper simple selectors first.
//!
//! Meeting all these constraints without extra memmove traffic during parsing
//! is non-trivial. This module encapsulates those details and presents an
//! easy-to-use API for the parser.
use parser::{Combinator, Component, SelectorImpl};
use servo_arc::{Arc, HeaderWithLength, ThinArc};
use sink::Push;
use smallvec::{self, SmallVec};
use std::cmp;
use std::iter;
use std::ops::{AddAssign, Add};
use std::ptr;
use std::slice;
/// Top-level SelectorBuilder struct. This should be stack-allocated by the
/// consumer and never moved (because it contains a lot of inline data that
/// would be slow to memmov).
///
/// After instantation, callers may call the push_simple_selector() and
/// push_combinator() methods to append selector data as it is encountered
/// (from left to right). Once the process is complete, callers should invoke
/// build(), which transforms the contents of the SelectorBuilder into a heap-
/// allocated Selector and leaves the builder in a drained state.
#[derive(Debug)]
pub struct SelectorBuilder<Impl: SelectorImpl> {
/// The entire sequence of simple selectors, from left to right, without combinators.
///
/// We make this large because the result of parsing a selector is fed into a new
/// Arc-ed allocation, so any spilled vec would be a wasted allocation. Also,
/// Components are large enough that we don't have much cache locality benefit
/// from reserving stack space for fewer of them.
simple_selectors: SmallVec<[Component<Impl>; 32]>,
/// The combinators, and the length of the compound selector to their left.
combinators: SmallVec<[(Combinator, usize); 16]>,
/// The length of the current compount selector.
current_len: usize,
}
impl<Impl: SelectorImpl> Default for SelectorBuilder<Impl> {
#[inline(always)]
fn default() -> Self {
SelectorBuilder {
simple_selectors: SmallVec::new(),
combinators: SmallVec::new(),
current_len: 0,
}
}
}
impl<Impl: SelectorImpl> Push<Component<Impl>> for SelectorBuilder<Impl> {
fn push(&mut self, value: Component<Impl>) {
self.push_simple_selector(value);
}
}
impl<Impl: SelectorImpl> SelectorBuilder<Impl> {
/// Pushes a simple selector onto the current compound selector.
#[inline(always)]
pub fn push_simple_selector(&mut self, ss: Component<Impl>) {
debug_assert!(!ss.is_combinator());
self.simple_selectors.push(ss);
self.current_len += 1;
}
/// Completes the current compound selector and starts a new one, delimited
/// by the given combinator.
#[inline(always)]
pub fn push_combinator(&mut self, c: Combinator) {
self.combinators.push((c, self.current_len));
self.current_len = 0;
}
/// Returns true if no simple selectors have ever been pushed to this builder.
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.simple_selectors.is_empty()
}
/// Returns true if combinators have ever been pushed to this builder.
#[inline(always)]
pub fn has_combinators(&self) -> bool {
!self.combinators.is_empty()
}
/// Consumes the builder, producing a Selector.
#[inline(always)]
pub fn build(
&mut self,
parsed_pseudo: bool,
parsed_slotted: bool,
) -> ThinArc<SpecificityAndFlags, Component<Impl>> {
// Compute the specificity and flags.
let mut spec = SpecificityAndFlags(specificity(&*self, self.simple_selectors.iter()));
if parsed_pseudo {
spec.0 |= HAS_PSEUDO_BIT;
}
if parsed_slotted {
spec.0 |= HAS_SLOTTED_BIT;
}
self.build_with_specificity_and_flags(spec)
}
/// Builds with an explicit SpecificityAndFlags. This is separated from build() so
/// that unit tests can pass an explicit specificity.
#[inline(always)]
pub fn build_with_specificity_and_flags(
&mut self,
spec: SpecificityAndFlags,
) -> ThinArc<SpecificityAndFlags, Component<Impl>> {
// First, compute the total number of Components we'll need to allocate
// space for.
let full_len = self.simple_selectors.len() + self.combinators.len();
// Create the header.
let header = HeaderWithLength::new(spec, full_len);
// Create the Arc using an iterator that drains our buffers.
// Use a raw pointer to be able to call set_len despite "borrowing" the slice.
// This is similar to SmallVec::drain, but we use a slice here because
// we’re gonna traverse it non-linearly.
let raw_simple_selectors: *const [Component<Impl>] = &*self.simple_selectors;
unsafe {
// Panic-safety: if SelectorBuilderIter is not iterated to the end,
// some simple selectors will safely leak.
self.simple_selectors.set_len(0)
}
let (rest, current) = split_from_end(unsafe { &*raw_simple_selectors }, self.current_len);
let iter = SelectorBuilderIter {
current_simple_selectors: current.iter(),
rest_of_simple_selectors: rest,
combinators: self.combinators.drain().rev(),
};
Arc::into_thin(Arc::from_header_and_iter(header, iter))
}
}
struct SelectorBuilderIter<'a, Impl: SelectorImpl> {
current_simple_selectors: slice::Iter<'a, Component<Impl>>,
rest_of_simple_selectors: &'a [Component<Impl>],
combinators: iter::Rev<smallvec::Drain<'a, (Combinator, usize)>>,
}
impl<'a, Impl: SelectorImpl> ExactSizeIterator for SelectorBuilderIter<'a, Impl> {
fn len(&self) -> usize {
self.current_simple_selectors.len() + self.rest_of_simple_selectors.len() +
self.combinators.len()
}
}
impl<'a, Impl: SelectorImpl> Iterator for SelectorBuilderIter<'a, Impl> {
type Item = Component<Impl>;
#[inline(always)]
fn next(&mut self) -> Option<Self::Item> {
if let Some(simple_selector_ref) = self.current_simple_selectors.next() {
// Move a simple selector out of this slice iterator.
// This is safe because we’ve called SmallVec::set_len(0) above,
// so SmallVec::drop won’t drop this simple selector.
unsafe { Some(ptr::read(simple_selector_ref)) }
} else {
self.combinators.next().map(|(combinator, len)| {
let (rest, current) = split_from_end(self.rest_of_simple_selectors, len);
self.rest_of_simple_selectors = rest;
self.current_simple_selectors = current.iter();
Component::Combinator(combinator)
})
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
}
fn split_from_end<T>(s: &[T], at: usize) -> (&[T], &[T]) {
s.split_at(s.len() - at)
}
pub const HAS_PSEUDO_BIT: u32 = 1 << 30;
pub const HAS_SLOTTED_BIT: u32 = 1 << 31;
/// We use ten bits for each specificity kind (id, class, element), and the two
/// high bits for the pseudo and slotted flags.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct SpecificityAndFlags(pub u32);
impl SpecificityAndFlags {
#[inline]
pub fn specificity(&self) -> u32 {
self.0 &!(HAS_PSEUDO_BIT | HAS_SLOTTED_BIT)
}
#[inline]
pub fn has_pseudo_element(&self) -> bool {
| #[inline]
pub fn is_slotted(&self) -> bool {
(self.0 & HAS_SLOTTED_BIT)!= 0
}
}
const MAX_10BIT: u32 = (1u32 << 10) - 1;
#[derive(Clone, Copy, Eq, Ord, PartialEq, PartialOrd)]
struct Specificity {
id_selectors: u32,
class_like_selectors: u32,
element_selectors: u32,
}
impl AddAssign for Specificity {
#[inline]
fn add_assign(&mut self, rhs: Self) {
self.id_selectors += rhs.id_selectors;
self.class_like_selectors += rhs.class_like_selectors;
self.element_selectors += rhs.element_selectors;
}
}
impl Add for Specificity {
type Output = Specificity;
fn add(self, rhs: Specificity) -> Specificity {
Specificity {
id_selectors: self.id_selectors + rhs.id_selectors,
class_like_selectors: self.class_like_selectors + rhs.class_like_selectors,
element_selectors: self.element_selectors + rhs.element_selectors,
}
}
}
impl Default for Specificity {
fn default() -> Specificity {
Specificity {
id_selectors: 0,
class_like_selectors: 0,
element_selectors: 0,
}
}
}
impl From<u32> for Specificity {
#[inline]
fn from(value: u32) -> Specificity {
assert!(value <= MAX_10BIT << 20 | MAX_10BIT << 10 | MAX_10BIT);
Specificity {
id_selectors: value >> 20,
class_like_selectors: (value >> 10) & MAX_10BIT,
element_selectors: value & MAX_10BIT,
}
}
}
impl From<Specificity> for u32 {
#[inline]
fn from(specificity: Specificity) -> u32 {
cmp::min(specificity.id_selectors, MAX_10BIT) << 20 |
cmp::min(specificity.class_like_selectors, MAX_10BIT) << 10 |
cmp::min(specificity.element_selectors, MAX_10BIT)
}
}
fn specificity<Impl>(builder: &SelectorBuilder<Impl>, iter: slice::Iter<Component<Impl>>) -> u32
where
Impl: SelectorImpl,
{
complex_selector_specificity(builder, iter).into()
}
fn complex_selector_specificity<Impl>(
builder: &SelectorBuilder<Impl>,
mut iter: slice::Iter<Component<Impl>>,
) -> Specificity
where
Impl: SelectorImpl,
{
fn simple_selector_specificity<Impl>(
builder: &SelectorBuilder<Impl>,
simple_selector: &Component<Impl>,
specificity: &mut Specificity,
) where
Impl: SelectorImpl,
{
match *simple_selector {
Component::Combinator(ref combinator) => {
unreachable!(
"Found combinator {:?} in simple selectors vector? {:?}",
combinator,
builder,
);
}
Component::PseudoElement(..) | Component::LocalName(..) => {
specificity.element_selectors += 1
},
Component::Slotted(ref selector) => {
specificity.element_selectors += 1;
// Note that due to the way ::slotted works we only compete with
// other ::slotted rules, so the above rule doesn't really
// matter, but we do it still for consistency with other
// pseudo-elements.
//
// See: https://github.com/w3c/csswg-drafts/issues/1915
*specificity += Specificity::from(selector.specificity());
},
Component::Host(ref selector) => {
specificity.class_like_selectors += 1;
if let Some(ref selector) = *selector {
// See: https://github.com/w3c/csswg-drafts/issues/1915
*specificity += Specificity::from(selector.specificity());
}
}
Component::ID(..) => {
specificity.id_selectors += 1;
},
Component::Class(..) |
Component::AttributeInNoNamespace {.. } |
Component::AttributeInNoNamespaceExists {.. } |
Component::AttributeOther(..) |
Component::FirstChild |
Component::LastChild |
Component::OnlyChild |
Component::Root |
Component::Empty |
Component::Scope |
Component::NthChild(..) |
Component::NthLastChild(..) |
Component::NthOfType(..) |
Component::NthLastOfType(..) |
Component::FirstOfType |
Component::LastOfType |
Component::OnlyOfType |
Component::NonTSPseudoClass(..) => {
specificity.class_like_selectors += 1;
},
Component::ExplicitUniversalType |
Component::ExplicitAnyNamespace |
Component::ExplicitNoNamespace |
Component::DefaultNamespace(..) |
Component::Namespace(..) => {
// Does not affect specificity
},
Component::Negation(ref negated) => {
for ss in negated.iter() {
simple_selector_specificity(builder, &ss, specificity);
}
},
}
}
let mut specificity = Default::default();
for simple_selector in &mut iter {
simple_selector_specificity(builder, &simple_selector, &mut specificity);
}
specificity
}
| (self.0 & HAS_PSEUDO_BIT) != 0
}
| identifier_body |
vec-dst.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn sub_expr() {
// Test for a &[T] => &&[T] coercion in sub-expression position
// (surpisingly, this can cause errors which are not caused by either of:
// `let x = vec.slice_mut(0, 2);`
// `foo(vec.slice_mut(0, 2));` ).
let mut vec: Vec<int> = vec!(1, 2, 3, 4);
let b: &mut [int] = [1, 2];
assert!(vec.slice_mut(0, 2) == b);
}
fn index() {
// Tests for indexing into box/& [T,..n]
let x: [int,..3] = [1, 2, 3];
let mut x: Box<[int,..3]> = box x;
assert!(x[0] == 1);
assert!(x[1] == 2);
assert!(x[2] == 3);
x[1] = 45;
assert!(x[0] == 1);
assert!(x[1] == 45);
assert!(x[2] == 3);
let mut x: [int,..3] = [1, 2, 3];
let x: &mut [int,..3] = &mut x;
assert!(x[0] == 1);
assert!(x[1] == 2);
assert!(x[2] == 3);
x[1] = 45;
assert!(x[0] == 1);
assert!(x[1] == 45);
assert!(x[2] == 3);
}
pub fn | () {
sub_expr();
index();
}
| main | identifier_name |
vec-dst.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn sub_expr() {
// Test for a &[T] => &&[T] coercion in sub-expression position
// (surpisingly, this can cause errors which are not caused by either of:
// `let x = vec.slice_mut(0, 2);`
// `foo(vec.slice_mut(0, 2));` ).
let mut vec: Vec<int> = vec!(1, 2, 3, 4);
let b: &mut [int] = [1, 2];
assert!(vec.slice_mut(0, 2) == b);
}
fn index() | assert!(x[2] == 3);
}
pub fn main() {
sub_expr();
index();
}
| {
// Tests for indexing into box/& [T, ..n]
let x: [int, ..3] = [1, 2, 3];
let mut x: Box<[int, ..3]> = box x;
assert!(x[0] == 1);
assert!(x[1] == 2);
assert!(x[2] == 3);
x[1] = 45;
assert!(x[0] == 1);
assert!(x[1] == 45);
assert!(x[2] == 3);
let mut x: [int, ..3] = [1, 2, 3];
let x: &mut [int, ..3] = &mut x;
assert!(x[0] == 1);
assert!(x[1] == 2);
assert!(x[2] == 3);
x[1] = 45;
assert!(x[0] == 1);
assert!(x[1] == 45); | identifier_body |
vec-dst.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | fn sub_expr() {
// Test for a &[T] => &&[T] coercion in sub-expression position
// (surpisingly, this can cause errors which are not caused by either of:
// `let x = vec.slice_mut(0, 2);`
// `foo(vec.slice_mut(0, 2));` ).
let mut vec: Vec<int> = vec!(1, 2, 3, 4);
let b: &mut [int] = [1, 2];
assert!(vec.slice_mut(0, 2) == b);
}
fn index() {
// Tests for indexing into box/& [T,..n]
let x: [int,..3] = [1, 2, 3];
let mut x: Box<[int,..3]> = box x;
assert!(x[0] == 1);
assert!(x[1] == 2);
assert!(x[2] == 3);
x[1] = 45;
assert!(x[0] == 1);
assert!(x[1] == 45);
assert!(x[2] == 3);
let mut x: [int,..3] = [1, 2, 3];
let x: &mut [int,..3] = &mut x;
assert!(x[0] == 1);
assert!(x[1] == 2);
assert!(x[2] == 3);
x[1] = 45;
assert!(x[0] == 1);
assert!(x[1] == 45);
assert!(x[2] == 3);
}
pub fn main() {
sub_expr();
index();
} | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
| random_line_split |
expr-block.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)]
// Tests for standalone blocks as expressions
fn test_basic() { let rs: bool = { true }; assert!((rs)); }
struct RS { v1: isize, v2: isize }
fn test_rec() { let rs = { RS {v1: 10, v2: 20} }; assert_eq!(rs.v2, 20); }
fn test_filled_with_stuff() {
let rs = { let mut a = 0; while a < 10 { a += 1; } a };
assert_eq!(rs, 10);
}
pub fn main() { test_basic(); test_rec(); test_filled_with_stuff(); } | random_line_split |
|
expr-block.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)]
// Tests for standalone blocks as expressions
fn test_basic() |
struct RS { v1: isize, v2: isize }
fn test_rec() { let rs = { RS {v1: 10, v2: 20} }; assert_eq!(rs.v2, 20); }
fn test_filled_with_stuff() {
let rs = { let mut a = 0; while a < 10 { a += 1; } a };
assert_eq!(rs, 10);
}
pub fn main() { test_basic(); test_rec(); test_filled_with_stuff(); }
| { let rs: bool = { true }; assert!((rs)); } | identifier_body |
expr-block.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)]
// Tests for standalone blocks as expressions
fn test_basic() { let rs: bool = { true }; assert!((rs)); }
struct RS { v1: isize, v2: isize }
fn test_rec() { let rs = { RS {v1: 10, v2: 20} }; assert_eq!(rs.v2, 20); }
fn test_filled_with_stuff() {
let rs = { let mut a = 0; while a < 10 { a += 1; } a };
assert_eq!(rs, 10);
}
pub fn | () { test_basic(); test_rec(); test_filled_with_stuff(); }
| main | identifier_name |
crypter.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use byteorder::{BigEndian, ByteOrder};
use derive_more::Deref;
use engine_traits::EncryptionMethod as DBEncryptionMethod;
use kvproto::encryptionpb::EncryptionMethod;
use openssl::symm::{self, Cipher as OCipher};
use rand::{rngs::OsRng, RngCore};
use tikv_util::{box_err, impl_display_as_debug};
use crate::{Error, Result};
#[cfg(not(feature = "prost-codec"))]
pub fn encryption_method_to_db_encryption_method(method: EncryptionMethod) -> DBEncryptionMethod {
match method {
EncryptionMethod::Plaintext => DBEncryptionMethod::Plaintext,
EncryptionMethod::Aes128Ctr => DBEncryptionMethod::Aes128Ctr,
EncryptionMethod::Aes192Ctr => DBEncryptionMethod::Aes192Ctr,
EncryptionMethod::Aes256Ctr => DBEncryptionMethod::Aes256Ctr,
EncryptionMethod::Unknown => DBEncryptionMethod::Unknown,
}
}
pub fn encryption_method_from_db_encryption_method(method: DBEncryptionMethod) -> EncryptionMethod {
match method {
DBEncryptionMethod::Plaintext => EncryptionMethod::Plaintext,
DBEncryptionMethod::Aes128Ctr => EncryptionMethod::Aes128Ctr,
DBEncryptionMethod::Aes192Ctr => EncryptionMethod::Aes192Ctr,
DBEncryptionMethod::Aes256Ctr => EncryptionMethod::Aes256Ctr,
DBEncryptionMethod::Unknown => EncryptionMethod::Unknown,
}
}
#[cfg(not(feature = "prost-codec"))]
pub fn compat(method: EncryptionMethod) -> EncryptionMethod {
method
}
#[cfg(feature = "prost-codec")]
pub fn encryption_method_to_db_encryption_method(
method: i32, /* EncryptionMethod */
) -> DBEncryptionMethod {
match method {
1/* EncryptionMethod::Plaintext */ => DBEncryptionMethod::Plaintext,
2/* EncryptionMethod::Aes128Ctr */ => DBEncryptionMethod::Aes128Ctr,
3/* EncryptionMethod::Aes192Ctr */ => DBEncryptionMethod::Aes192Ctr,
4/* EncryptionMethod::Aes256Ctr */ => DBEncryptionMethod::Aes256Ctr,
_/* EncryptionMethod::Unknown */ => DBEncryptionMethod::Unknown,
}
}
#[cfg(feature = "prost-codec")]
pub fn compat(method: EncryptionMethod) -> i32 {
match method {
EncryptionMethod::Unknown => 0,
EncryptionMethod::Plaintext => 1,
EncryptionMethod::Aes128Ctr => 2,
EncryptionMethod::Aes192Ctr => 3,
EncryptionMethod::Aes256Ctr => 4,
}
}
pub fn get_method_key_length(method: EncryptionMethod) -> usize {
match method {
EncryptionMethod::Plaintext => 0,
EncryptionMethod::Aes128Ctr => 16,
EncryptionMethod::Aes192Ctr => 24,
EncryptionMethod::Aes256Ctr => 32,
unknown => panic!("bad EncryptionMethod {:?}", unknown),
}
}
// IV's the length should be 12 btyes for GCM mode.
const GCM_IV_12: usize = 12;
// IV's the length should be 16 btyes for CTR mode.
const CTR_IV_16: usize = 16;
#[derive(Debug, Clone, Copy)]
pub enum Iv {
Gcm([u8; GCM_IV_12]),
Ctr([u8; CTR_IV_16]),
}
impl Iv {
/// Generate a random IV for AES-GCM.
pub fn new_gcm() -> Iv {
let mut iv = [0u8; GCM_IV_12];
OsRng.fill_bytes(&mut iv);
Iv::Gcm(iv)
}
/// Generate a random IV for AES-CTR.
pub fn new_ctr() -> Iv {
let mut iv = [0u8; CTR_IV_16];
OsRng.fill_bytes(&mut iv);
Iv::Ctr(iv)
}
pub fn from_slice(src: &[u8]) -> Result<Iv> {
if src.len() == CTR_IV_16 {
let mut iv = [0; CTR_IV_16];
iv.copy_from_slice(src);
Ok(Iv::Ctr(iv))
} else if src.len() == GCM_IV_12 {
let mut iv = [0; GCM_IV_12];
iv.copy_from_slice(src);
Ok(Iv::Gcm(iv))
} else {
Err(box_err!(
"Nonce + Counter must be 12/16 bytes, {}",
src.len()
))
}
}
pub fn as_slice(&self) -> &[u8] {
match self {
Iv::Ctr(iv) => iv,
Iv::Gcm(iv) => iv,
}
}
pub fn add_offset(&mut self, offset: u64) -> Result<()> {
match self {
Iv::Ctr(iv) => {
let v = BigEndian::read_u128(iv);
BigEndian::write_u128(iv, v.wrapping_add(offset as u128));
Ok(())
}
Iv::Gcm(_) => Err(box_err!("offset addition is not supported for GCM mode")),
}
}
}
// The length GCM tag must be 16 bytes.
const GCM_TAG_LEN: usize = 16;
pub struct AesGcmTag([u8; GCM_TAG_LEN]);
impl<'a> From<&'a [u8]> for AesGcmTag {
fn from(src: &'a [u8]) -> AesGcmTag {
assert!(src.len() >= GCM_TAG_LEN, "AES GCM tag must be 16 bytes");
let mut tag = [0; GCM_TAG_LEN];
tag.copy_from_slice(src);
AesGcmTag(tag)
}
}
impl AesGcmTag {
pub fn as_slice(&self) -> &[u8] {
&self.0
}
}
/// An Aes256-GCM crypter.
pub struct AesGcmCrypter<'k> {
iv: Iv,
key: &'k PlainKey,
}
impl<'k> AesGcmCrypter<'k> {
/// The key length of `AesGcmCrypter` is 32 bytes.
pub const KEY_LEN: usize = 32;
pub fn new(key: &'k PlainKey, iv: Iv) -> AesGcmCrypter<'k> {
AesGcmCrypter { iv, key }
}
pub fn encrypt(&self, pt: &[u8]) -> Result<(Vec<u8>, AesGcmTag)> {
let cipher = OCipher::aes_256_gcm();
let mut tag = AesGcmTag([0u8; GCM_TAG_LEN]);
let ciphertext = symm::encrypt_aead(
cipher,
&self.key.0,
Some(self.iv.as_slice()),
&[], /* AAD */
pt,
&mut tag.0,
)?;
Ok((ciphertext, tag))
}
pub fn decrypt(&self, ct: &[u8], tag: AesGcmTag) -> Result<Vec<u8>> {
let cipher = OCipher::aes_256_gcm();
let plaintext = symm::decrypt_aead(
cipher,
&self.key.0,
Some(self.iv.as_slice()),
&[], /* AAD */
ct,
&tag.0,
)?;
Ok(plaintext)
}
}
pub fn verify_encryption_config(method: EncryptionMethod, key: &[u8]) -> Result<()> {
if method == EncryptionMethod::Unknown {
return Err(Error::UnknownEncryption);
}
if method!= EncryptionMethod::Plaintext {
let key_len = get_method_key_length(method);
if key.len()!= key_len {
return Err(box_err!(
"unexpected key length, expected {} vs actual {}",
key_len,
key.len()
));
}
}
Ok(())
}
// PlainKey is a newtype used to mark a vector a plaintext key.
// It requires the vec to be a valid AesGcmCrypter key.
#[derive(Deref)]
pub struct PlainKey(Vec<u8>);
impl PlainKey {
pub fn new(key: Vec<u8>) -> Result<Self> {
if key.len()!= AesGcmCrypter::KEY_LEN {
return Err(box_err!(
"encryption method and key length mismatch, expect {} get {}",
AesGcmCrypter::KEY_LEN,
key.len()
));
}
Ok(Self(key))
}
}
// Don't expose the key in a debug print
impl std::fmt::Debug for PlainKey {
fn | (&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("PlainKey")
.field(&"REDACTED".to_string())
.finish()
}
}
// Don't expose the key in a display print
impl_display_as_debug!(PlainKey);
#[cfg(test)]
mod tests {
use hex::FromHex;
use super::*;
#[test]
fn test_iv() {
let mut ivs = Vec::with_capacity(100);
for c in 0..100 {
if c % 2 == 0 {
ivs.push(Iv::new_ctr());
} else {
ivs.push(Iv::new_gcm());
}
}
ivs.dedup_by(|a, b| a.as_slice() == b.as_slice());
assert_eq!(ivs.len(), 100);
for iv in ivs {
let iv1 = Iv::from_slice(iv.as_slice()).unwrap();
assert_eq!(iv.as_slice(), iv1.as_slice());
}
}
#[test]
fn test_ase_256_gcm() {
// See more http://csrc.nist.gov/groups/STM/cavp/documents/mac/gcmtestvectors.zip
//
// [Keylen = 256]
// [IVlen = 96]
// [PTlen = 256]
// [AADlen = 0]
// [Taglen = 128]
//
// Count = 0
// Key = c3d99825f2181f4808acd2068eac7441a65bd428f14d2aab43fefc0129091139
// IV = cafabd9672ca6c79a2fbdc22
// CT = 84e5f23f95648fa247cb28eef53abec947dbf05ac953734618111583840bd980
// AAD =
// Tag = 79651c875f7941793d42bbd0af1cce7c
// PT = 25431587e9ecffc7c37f8d6d52a9bc3310651d46fb0e3bad2726c8f2db653749
let pt = "25431587e9ecffc7c37f8d6d52a9bc3310651d46fb0e3bad2726c8f2db653749";
let ct = "84e5f23f95648fa247cb28eef53abec947dbf05ac953734618111583840bd980";
let key = "c3d99825f2181f4808acd2068eac7441a65bd428f14d2aab43fefc0129091139";
let iv = "cafabd9672ca6c79a2fbdc22";
let tag = "79651c875f7941793d42bbd0af1cce7c";
let pt = Vec::from_hex(pt).unwrap();
let ct = Vec::from_hex(ct).unwrap();
let key = PlainKey::new(Vec::from_hex(key).unwrap()).unwrap();
let iv = Iv::from_slice(Vec::from_hex(iv).unwrap().as_slice()).unwrap();
let tag = Vec::from_hex(tag).unwrap();
let crypter = AesGcmCrypter::new(&key, iv);
let (ciphertext, gcm_tag) = crypter.encrypt(&pt).unwrap();
assert_eq!(ciphertext, ct, "{}", hex::encode(&ciphertext));
assert_eq!(gcm_tag.0.to_vec(), tag, "{}", hex::encode(&gcm_tag.0));
let plaintext = crypter.decrypt(&ct, gcm_tag).unwrap();
assert_eq!(plaintext, pt, "{}", hex::encode(&plaintext));
// Fail to decrypt with a wrong tag.
crypter
.decrypt(&ct, AesGcmTag([0u8; GCM_TAG_LEN]))
.unwrap_err();
}
}
| fmt | identifier_name |
crypter.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use byteorder::{BigEndian, ByteOrder};
use derive_more::Deref;
use engine_traits::EncryptionMethod as DBEncryptionMethod;
use kvproto::encryptionpb::EncryptionMethod;
use openssl::symm::{self, Cipher as OCipher};
use rand::{rngs::OsRng, RngCore};
use tikv_util::{box_err, impl_display_as_debug};
use crate::{Error, Result};
#[cfg(not(feature = "prost-codec"))]
pub fn encryption_method_to_db_encryption_method(method: EncryptionMethod) -> DBEncryptionMethod {
match method {
EncryptionMethod::Plaintext => DBEncryptionMethod::Plaintext,
EncryptionMethod::Aes128Ctr => DBEncryptionMethod::Aes128Ctr,
EncryptionMethod::Aes192Ctr => DBEncryptionMethod::Aes192Ctr,
EncryptionMethod::Aes256Ctr => DBEncryptionMethod::Aes256Ctr,
EncryptionMethod::Unknown => DBEncryptionMethod::Unknown,
}
}
pub fn encryption_method_from_db_encryption_method(method: DBEncryptionMethod) -> EncryptionMethod {
match method {
DBEncryptionMethod::Plaintext => EncryptionMethod::Plaintext,
DBEncryptionMethod::Aes128Ctr => EncryptionMethod::Aes128Ctr,
DBEncryptionMethod::Aes192Ctr => EncryptionMethod::Aes192Ctr,
DBEncryptionMethod::Aes256Ctr => EncryptionMethod::Aes256Ctr,
DBEncryptionMethod::Unknown => EncryptionMethod::Unknown,
}
}
#[cfg(not(feature = "prost-codec"))]
pub fn compat(method: EncryptionMethod) -> EncryptionMethod {
method
}
#[cfg(feature = "prost-codec")]
pub fn encryption_method_to_db_encryption_method(
method: i32, /* EncryptionMethod */
) -> DBEncryptionMethod {
match method {
1/* EncryptionMethod::Plaintext */ => DBEncryptionMethod::Plaintext,
2/* EncryptionMethod::Aes128Ctr */ => DBEncryptionMethod::Aes128Ctr,
3/* EncryptionMethod::Aes192Ctr */ => DBEncryptionMethod::Aes192Ctr,
4/* EncryptionMethod::Aes256Ctr */ => DBEncryptionMethod::Aes256Ctr,
_/* EncryptionMethod::Unknown */ => DBEncryptionMethod::Unknown,
}
}
#[cfg(feature = "prost-codec")]
pub fn compat(method: EncryptionMethod) -> i32 {
match method {
EncryptionMethod::Unknown => 0,
EncryptionMethod::Plaintext => 1,
EncryptionMethod::Aes128Ctr => 2,
EncryptionMethod::Aes192Ctr => 3,
EncryptionMethod::Aes256Ctr => 4,
}
}
pub fn get_method_key_length(method: EncryptionMethod) -> usize {
match method {
EncryptionMethod::Plaintext => 0,
EncryptionMethod::Aes128Ctr => 16,
EncryptionMethod::Aes192Ctr => 24,
EncryptionMethod::Aes256Ctr => 32,
unknown => panic!("bad EncryptionMethod {:?}", unknown),
}
}
// IV's the length should be 12 btyes for GCM mode.
const GCM_IV_12: usize = 12;
// IV's the length should be 16 btyes for CTR mode.
const CTR_IV_16: usize = 16;
#[derive(Debug, Clone, Copy)]
pub enum Iv {
Gcm([u8; GCM_IV_12]),
Ctr([u8; CTR_IV_16]),
}
impl Iv {
/// Generate a random IV for AES-GCM.
pub fn new_gcm() -> Iv {
let mut iv = [0u8; GCM_IV_12];
OsRng.fill_bytes(&mut iv);
Iv::Gcm(iv)
}
/// Generate a random IV for AES-CTR.
pub fn new_ctr() -> Iv {
let mut iv = [0u8; CTR_IV_16];
OsRng.fill_bytes(&mut iv);
Iv::Ctr(iv)
}
pub fn from_slice(src: &[u8]) -> Result<Iv> {
if src.len() == CTR_IV_16 {
let mut iv = [0; CTR_IV_16];
iv.copy_from_slice(src);
Ok(Iv::Ctr(iv))
} else if src.len() == GCM_IV_12 {
let mut iv = [0; GCM_IV_12];
iv.copy_from_slice(src);
Ok(Iv::Gcm(iv))
} else {
Err(box_err!(
"Nonce + Counter must be 12/16 bytes, {}",
src.len()
))
}
}
pub fn as_slice(&self) -> &[u8] {
match self {
Iv::Ctr(iv) => iv,
Iv::Gcm(iv) => iv,
}
}
pub fn add_offset(&mut self, offset: u64) -> Result<()> {
match self {
Iv::Ctr(iv) => {
let v = BigEndian::read_u128(iv);
BigEndian::write_u128(iv, v.wrapping_add(offset as u128));
Ok(())
}
Iv::Gcm(_) => Err(box_err!("offset addition is not supported for GCM mode")),
}
}
}
// The length GCM tag must be 16 bytes.
const GCM_TAG_LEN: usize = 16;
pub struct AesGcmTag([u8; GCM_TAG_LEN]);
impl<'a> From<&'a [u8]> for AesGcmTag {
fn from(src: &'a [u8]) -> AesGcmTag {
assert!(src.len() >= GCM_TAG_LEN, "AES GCM tag must be 16 bytes");
let mut tag = [0; GCM_TAG_LEN];
tag.copy_from_slice(src);
AesGcmTag(tag)
}
}
impl AesGcmTag {
pub fn as_slice(&self) -> &[u8] {
&self.0
}
}
/// An Aes256-GCM crypter.
pub struct AesGcmCrypter<'k> {
iv: Iv,
key: &'k PlainKey,
}
impl<'k> AesGcmCrypter<'k> {
/// The key length of `AesGcmCrypter` is 32 bytes.
pub const KEY_LEN: usize = 32;
pub fn new(key: &'k PlainKey, iv: Iv) -> AesGcmCrypter<'k> {
AesGcmCrypter { iv, key }
}
pub fn encrypt(&self, pt: &[u8]) -> Result<(Vec<u8>, AesGcmTag)> {
let cipher = OCipher::aes_256_gcm();
let mut tag = AesGcmTag([0u8; GCM_TAG_LEN]);
let ciphertext = symm::encrypt_aead(
cipher,
&self.key.0, | &mut tag.0,
)?;
Ok((ciphertext, tag))
}
pub fn decrypt(&self, ct: &[u8], tag: AesGcmTag) -> Result<Vec<u8>> {
let cipher = OCipher::aes_256_gcm();
let plaintext = symm::decrypt_aead(
cipher,
&self.key.0,
Some(self.iv.as_slice()),
&[], /* AAD */
ct,
&tag.0,
)?;
Ok(plaintext)
}
}
pub fn verify_encryption_config(method: EncryptionMethod, key: &[u8]) -> Result<()> {
if method == EncryptionMethod::Unknown {
return Err(Error::UnknownEncryption);
}
if method!= EncryptionMethod::Plaintext {
let key_len = get_method_key_length(method);
if key.len()!= key_len {
return Err(box_err!(
"unexpected key length, expected {} vs actual {}",
key_len,
key.len()
));
}
}
Ok(())
}
// PlainKey is a newtype used to mark a vector a plaintext key.
// It requires the vec to be a valid AesGcmCrypter key.
#[derive(Deref)]
pub struct PlainKey(Vec<u8>);
impl PlainKey {
pub fn new(key: Vec<u8>) -> Result<Self> {
if key.len()!= AesGcmCrypter::KEY_LEN {
return Err(box_err!(
"encryption method and key length mismatch, expect {} get {}",
AesGcmCrypter::KEY_LEN,
key.len()
));
}
Ok(Self(key))
}
}
// Don't expose the key in a debug print
impl std::fmt::Debug for PlainKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("PlainKey")
.field(&"REDACTED".to_string())
.finish()
}
}
// Don't expose the key in a display print
impl_display_as_debug!(PlainKey);
#[cfg(test)]
mod tests {
use hex::FromHex;
use super::*;
#[test]
fn test_iv() {
let mut ivs = Vec::with_capacity(100);
for c in 0..100 {
if c % 2 == 0 {
ivs.push(Iv::new_ctr());
} else {
ivs.push(Iv::new_gcm());
}
}
ivs.dedup_by(|a, b| a.as_slice() == b.as_slice());
assert_eq!(ivs.len(), 100);
for iv in ivs {
let iv1 = Iv::from_slice(iv.as_slice()).unwrap();
assert_eq!(iv.as_slice(), iv1.as_slice());
}
}
#[test]
fn test_ase_256_gcm() {
// See more http://csrc.nist.gov/groups/STM/cavp/documents/mac/gcmtestvectors.zip
//
// [Keylen = 256]
// [IVlen = 96]
// [PTlen = 256]
// [AADlen = 0]
// [Taglen = 128]
//
// Count = 0
// Key = c3d99825f2181f4808acd2068eac7441a65bd428f14d2aab43fefc0129091139
// IV = cafabd9672ca6c79a2fbdc22
// CT = 84e5f23f95648fa247cb28eef53abec947dbf05ac953734618111583840bd980
// AAD =
// Tag = 79651c875f7941793d42bbd0af1cce7c
// PT = 25431587e9ecffc7c37f8d6d52a9bc3310651d46fb0e3bad2726c8f2db653749
let pt = "25431587e9ecffc7c37f8d6d52a9bc3310651d46fb0e3bad2726c8f2db653749";
let ct = "84e5f23f95648fa247cb28eef53abec947dbf05ac953734618111583840bd980";
let key = "c3d99825f2181f4808acd2068eac7441a65bd428f14d2aab43fefc0129091139";
let iv = "cafabd9672ca6c79a2fbdc22";
let tag = "79651c875f7941793d42bbd0af1cce7c";
let pt = Vec::from_hex(pt).unwrap();
let ct = Vec::from_hex(ct).unwrap();
let key = PlainKey::new(Vec::from_hex(key).unwrap()).unwrap();
let iv = Iv::from_slice(Vec::from_hex(iv).unwrap().as_slice()).unwrap();
let tag = Vec::from_hex(tag).unwrap();
let crypter = AesGcmCrypter::new(&key, iv);
let (ciphertext, gcm_tag) = crypter.encrypt(&pt).unwrap();
assert_eq!(ciphertext, ct, "{}", hex::encode(&ciphertext));
assert_eq!(gcm_tag.0.to_vec(), tag, "{}", hex::encode(&gcm_tag.0));
let plaintext = crypter.decrypt(&ct, gcm_tag).unwrap();
assert_eq!(plaintext, pt, "{}", hex::encode(&plaintext));
// Fail to decrypt with a wrong tag.
crypter
.decrypt(&ct, AesGcmTag([0u8; GCM_TAG_LEN]))
.unwrap_err();
}
} | Some(self.iv.as_slice()),
&[], /* AAD */
pt, | random_line_split |
regions-fn-subtyping.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
#![allow(unused_assignments)]
// Issue #2263.
// pretty-expanded FIXME #23616
#![allow(unused_variables)]
// Should pass region checking.
fn | (f: Box<FnMut(&usize)>) {
// Here, g is a function that can accept a usize pointer with
// lifetime r, and f is a function that can accept a usize pointer
// with any lifetime. The assignment g = f should be OK (i.e.,
// f's type should be a subtype of g's type), because f can be
// used in any context that expects g's type. But this currently
// fails.
let mut g: Box<for<'r> FnMut(&'r usize)> = Box::new(|x| { });
g = f;
}
// This version is the same as above, except that here, g's type is
// inferred.
fn ok_inferred(f: Box<FnMut(&usize)>) {
let mut g: Box<for<'r> FnMut(&'r usize)> = Box::new(|_| {});
g = f;
}
pub fn main() {
}
| ok | identifier_name |
regions-fn-subtyping.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
#![allow(unused_assignments)]
// Issue #2263.
// pretty-expanded FIXME #23616
#![allow(unused_variables)]
// Should pass region checking.
fn ok(f: Box<FnMut(&usize)>) {
// Here, g is a function that can accept a usize pointer with
// lifetime r, and f is a function that can accept a usize pointer
// with any lifetime. The assignment g = f should be OK (i.e.,
// f's type should be a subtype of g's type), because f can be
// used in any context that expects g's type. But this currently
// fails.
let mut g: Box<for<'r> FnMut(&'r usize)> = Box::new(|x| { });
g = f;
}
// This version is the same as above, except that here, g's type is
// inferred.
fn ok_inferred(f: Box<FnMut(&usize)>) {
let mut g: Box<for<'r> FnMut(&'r usize)> = Box::new(|_| {});
g = f;
}
pub fn main() | {
} | identifier_body |
|
regions-fn-subtyping.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
#![allow(unused_assignments)]
// Issue #2263.
// pretty-expanded FIXME #23616
#![allow(unused_variables)]
// Should pass region checking.
fn ok(f: Box<FnMut(&usize)>) {
// Here, g is a function that can accept a usize pointer with
// lifetime r, and f is a function that can accept a usize pointer
// with any lifetime. The assignment g = f should be OK (i.e.,
// f's type should be a subtype of g's type), because f can be
// used in any context that expects g's type. But this currently
// fails.
let mut g: Box<for<'r> FnMut(&'r usize)> = Box::new(|x| { });
g = f;
}
// This version is the same as above, except that here, g's type is
// inferred.
fn ok_inferred(f: Box<FnMut(&usize)>) {
let mut g: Box<for<'r> FnMut(&'r usize)> = Box::new(|_| {});
g = f;
} | } |
pub fn main() { | random_line_split |
lib.rs | //! Asynchronous channels.
//!
//! This crate provides channels that can be used to communicate between
//! asynchronous tasks.
//!
//! All items of this library are only available when the `std` or `alloc` feature of this
//! library is activated, and it is activated by default.
#![cfg_attr(feature = "cfg-target-has-atomic", feature(cfg_target_has_atomic))]
#![cfg_attr(not(feature = "std"), no_std)]
#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms, unreachable_pub)]
// It cannot be included in the published code because this lints have false positives in the minimum required version.
#![cfg_attr(test, warn(single_use_lifetimes))]
#![warn(clippy::all)]
#![doc(test(attr(deny(warnings), allow(dead_code, unused_assignments, unused_variables))))]
#![doc(html_root_url = "https://docs.rs/futures-channel/0.3.0")]
#[cfg(all(feature = "cfg-target-has-atomic", not(feature = "unstable")))]
compile_error!("The `cfg-target-has-atomic` feature requires the `unstable` feature as an explicit opt-in to unstable features");
| ($($item:item)*) => {$(
#[cfg_attr(feature = "cfg-target-has-atomic", cfg(target_has_atomic = "ptr"))]
$item
)*};
}
cfg_target_has_atomic! {
#[cfg(feature = "alloc")]
extern crate alloc;
#[cfg(feature = "alloc")]
mod lock;
#[cfg(feature = "std")]
pub mod mpsc;
#[cfg(feature = "alloc")]
pub mod oneshot;
} | macro_rules! cfg_target_has_atomic { | random_line_split |
process-spawn-with-unicode-params.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// no-prefer-dynamic
// The test copies itself into a subdirectory with a non-ASCII name and then
// runs it as a child process within the subdirectory. The parent process
// also adds an environment variable and an argument, both containing
// non-ASCII characters. The child process ensures all the strings are
// intact.
use std::old_io;
use std::old_io::fs;
use std::old_io::Command;
use std::os;
use std::old_path::Path;
fn main() {
let my_args = os::args();
let my_cwd = os::getcwd().unwrap();
let my_env = os::env();
let my_path = Path::new(os::self_exe_name().unwrap());
let my_dir = my_path.dir_path();
let my_ext = my_path.extension_str().unwrap_or("");
// some non-ASCII characters
let blah = "\u03c0\u042f\u97f3\u00e6\u221e";
let child_name = "child";
let child_dir = format!("process-spawn-with-unicode-params-{}", blah);
// parameters sent to child / expected to be received from parent
let arg = blah;
let cwd = my_dir.join(Path::new(child_dir.clone()));
let env = ("RUST_TEST_PROC_SPAWN_UNICODE".to_string(), blah.to_string());
// am I the parent or the child?
if my_args.len() == 1 { // parent
let child_filestem = Path::new(child_name);
let child_filename = child_filestem.with_extension(my_ext);
let child_path = cwd.join(child_filename);
// make a separate directory for the child
drop(fs::mkdir(&cwd, old_io::USER_RWX).is_ok()); | my_env.push(env);
// run child
let p = Command::new(&child_path)
.arg(arg)
.cwd(&cwd)
.env_set_all(&my_env)
.spawn().unwrap().wait_with_output().unwrap();
// display the output
assert!(old_io::stdout().write(&p.output).is_ok());
assert!(old_io::stderr().write(&p.error).is_ok());
// make sure the child succeeded
assert!(p.status.success());
} else { // child
// check working directory (don't try to compare with `cwd` here!)
assert!(my_cwd.ends_with_path(&Path::new(child_dir)));
// check arguments
assert_eq!(&*my_args[1], arg);
// check environment variable
assert!(my_env.contains(&env));
};
} | assert!(fs::copy(&my_path, &child_path).is_ok());
let mut my_env = my_env; | random_line_split |
process-spawn-with-unicode-params.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// no-prefer-dynamic
// The test copies itself into a subdirectory with a non-ASCII name and then
// runs it as a child process within the subdirectory. The parent process
// also adds an environment variable and an argument, both containing
// non-ASCII characters. The child process ensures all the strings are
// intact.
use std::old_io;
use std::old_io::fs;
use std::old_io::Command;
use std::os;
use std::old_path::Path;
fn main() {
let my_args = os::args();
let my_cwd = os::getcwd().unwrap();
let my_env = os::env();
let my_path = Path::new(os::self_exe_name().unwrap());
let my_dir = my_path.dir_path();
let my_ext = my_path.extension_str().unwrap_or("");
// some non-ASCII characters
let blah = "\u03c0\u042f\u97f3\u00e6\u221e";
let child_name = "child";
let child_dir = format!("process-spawn-with-unicode-params-{}", blah);
// parameters sent to child / expected to be received from parent
let arg = blah;
let cwd = my_dir.join(Path::new(child_dir.clone()));
let env = ("RUST_TEST_PROC_SPAWN_UNICODE".to_string(), blah.to_string());
// am I the parent or the child?
if my_args.len() == 1 | assert!(old_io::stdout().write(&p.output).is_ok());
assert!(old_io::stderr().write(&p.error).is_ok());
// make sure the child succeeded
assert!(p.status.success());
}
else { // child
// check working directory (don't try to compare with `cwd` here!)
assert!(my_cwd.ends_with_path(&Path::new(child_dir)));
// check arguments
assert_eq!(&*my_args[1], arg);
// check environment variable
assert!(my_env.contains(&env));
};
}
| { // parent
let child_filestem = Path::new(child_name);
let child_filename = child_filestem.with_extension(my_ext);
let child_path = cwd.join(child_filename);
// make a separate directory for the child
drop(fs::mkdir(&cwd, old_io::USER_RWX).is_ok());
assert!(fs::copy(&my_path, &child_path).is_ok());
let mut my_env = my_env;
my_env.push(env);
// run child
let p = Command::new(&child_path)
.arg(arg)
.cwd(&cwd)
.env_set_all(&my_env)
.spawn().unwrap().wait_with_output().unwrap();
// display the output | conditional_block |
process-spawn-with-unicode-params.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// no-prefer-dynamic
// The test copies itself into a subdirectory with a non-ASCII name and then
// runs it as a child process within the subdirectory. The parent process
// also adds an environment variable and an argument, both containing
// non-ASCII characters. The child process ensures all the strings are
// intact.
use std::old_io;
use std::old_io::fs;
use std::old_io::Command;
use std::os;
use std::old_path::Path;
fn | () {
let my_args = os::args();
let my_cwd = os::getcwd().unwrap();
let my_env = os::env();
let my_path = Path::new(os::self_exe_name().unwrap());
let my_dir = my_path.dir_path();
let my_ext = my_path.extension_str().unwrap_or("");
// some non-ASCII characters
let blah = "\u03c0\u042f\u97f3\u00e6\u221e";
let child_name = "child";
let child_dir = format!("process-spawn-with-unicode-params-{}", blah);
// parameters sent to child / expected to be received from parent
let arg = blah;
let cwd = my_dir.join(Path::new(child_dir.clone()));
let env = ("RUST_TEST_PROC_SPAWN_UNICODE".to_string(), blah.to_string());
// am I the parent or the child?
if my_args.len() == 1 { // parent
let child_filestem = Path::new(child_name);
let child_filename = child_filestem.with_extension(my_ext);
let child_path = cwd.join(child_filename);
// make a separate directory for the child
drop(fs::mkdir(&cwd, old_io::USER_RWX).is_ok());
assert!(fs::copy(&my_path, &child_path).is_ok());
let mut my_env = my_env;
my_env.push(env);
// run child
let p = Command::new(&child_path)
.arg(arg)
.cwd(&cwd)
.env_set_all(&my_env)
.spawn().unwrap().wait_with_output().unwrap();
// display the output
assert!(old_io::stdout().write(&p.output).is_ok());
assert!(old_io::stderr().write(&p.error).is_ok());
// make sure the child succeeded
assert!(p.status.success());
} else { // child
// check working directory (don't try to compare with `cwd` here!)
assert!(my_cwd.ends_with_path(&Path::new(child_dir)));
// check arguments
assert_eq!(&*my_args[1], arg);
// check environment variable
assert!(my_env.contains(&env));
};
}
| main | identifier_name |
process-spawn-with-unicode-params.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// no-prefer-dynamic
// The test copies itself into a subdirectory with a non-ASCII name and then
// runs it as a child process within the subdirectory. The parent process
// also adds an environment variable and an argument, both containing
// non-ASCII characters. The child process ensures all the strings are
// intact.
use std::old_io;
use std::old_io::fs;
use std::old_io::Command;
use std::os;
use std::old_path::Path;
fn main() | if my_args.len() == 1 { // parent
let child_filestem = Path::new(child_name);
let child_filename = child_filestem.with_extension(my_ext);
let child_path = cwd.join(child_filename);
// make a separate directory for the child
drop(fs::mkdir(&cwd, old_io::USER_RWX).is_ok());
assert!(fs::copy(&my_path, &child_path).is_ok());
let mut my_env = my_env;
my_env.push(env);
// run child
let p = Command::new(&child_path)
.arg(arg)
.cwd(&cwd)
.env_set_all(&my_env)
.spawn().unwrap().wait_with_output().unwrap();
// display the output
assert!(old_io::stdout().write(&p.output).is_ok());
assert!(old_io::stderr().write(&p.error).is_ok());
// make sure the child succeeded
assert!(p.status.success());
} else { // child
// check working directory (don't try to compare with `cwd` here!)
assert!(my_cwd.ends_with_path(&Path::new(child_dir)));
// check arguments
assert_eq!(&*my_args[1], arg);
// check environment variable
assert!(my_env.contains(&env));
};
}
| {
let my_args = os::args();
let my_cwd = os::getcwd().unwrap();
let my_env = os::env();
let my_path = Path::new(os::self_exe_name().unwrap());
let my_dir = my_path.dir_path();
let my_ext = my_path.extension_str().unwrap_or("");
// some non-ASCII characters
let blah = "\u03c0\u042f\u97f3\u00e6\u221e";
let child_name = "child";
let child_dir = format!("process-spawn-with-unicode-params-{}", blah);
// parameters sent to child / expected to be received from parent
let arg = blah;
let cwd = my_dir.join(Path::new(child_dir.clone()));
let env = ("RUST_TEST_PROC_SPAWN_UNICODE".to_string(), blah.to_string());
// am I the parent or the child? | identifier_body |
section_table.rs | use crate::error::{self, Error};
use crate::pe::relocation;
use alloc::string::{String, ToString};
use scroll::{ctx, Pread, Pwrite};
#[repr(C)]
#[derive(Debug, PartialEq, Clone, Default)]
pub struct SectionTable {
pub name: [u8; 8],
pub real_name: Option<String>,
pub virtual_size: u32,
pub virtual_address: u32,
pub size_of_raw_data: u32,
pub pointer_to_raw_data: u32,
pub pointer_to_relocations: u32,
pub pointer_to_linenumbers: u32,
pub number_of_relocations: u16,
pub number_of_linenumbers: u16,
pub characteristics: u32,
}
pub const SIZEOF_SECTION_TABLE: usize = 8 * 5;
// Based on https://github.com/llvm-mirror/llvm/blob/af7b1832a03ab6486c42a40d21695b2c03b2d8a3/lib/Object/COFFObjectFile.cpp#L70
// Decodes a string table entry in base 64 (//AAAAAA). Expects string without
// prefixed slashes.
fn base64_decode_string_entry(s: &str) -> Result<usize, ()> {
assert!(s.len() <= 6, "String too long, possible overflow.");
let mut val = 0;
for c in s.bytes() {
let v = if b'A' <= c && c <= b'Z' {
// 00..=25
c - b'A'
} else if b'a' <= c && c <= b'z' {
// 26..=51
c - b'a' + 26
} else if b'0' <= c && c <= b'9' {
// 52..=61
c - b'0' + 52
} else if c == b'+' {
// 62
62
} else if c == b'/' {
// 63
63
} else {
return Err(());
};
val = val * 64 + v as usize;
}
Ok(val)
}
impl SectionTable {
pub fn parse(
bytes: &[u8],
offset: &mut usize,
string_table_offset: usize,
) -> error::Result<Self> {
let mut table = SectionTable::default();
let mut name = [0u8; 8];
name.copy_from_slice(bytes.gread_with(offset, 8)?);
table.name = name;
table.virtual_size = bytes.gread_with(offset, scroll::LE)?;
table.virtual_address = bytes.gread_with(offset, scroll::LE)?;
table.size_of_raw_data = bytes.gread_with(offset, scroll::LE)?;
table.pointer_to_raw_data = bytes.gread_with(offset, scroll::LE)?;
table.pointer_to_relocations = bytes.gread_with(offset, scroll::LE)?;
table.pointer_to_linenumbers = bytes.gread_with(offset, scroll::LE)?;
table.number_of_relocations = bytes.gread_with(offset, scroll::LE)?;
table.number_of_linenumbers = bytes.gread_with(offset, scroll::LE)?;
table.characteristics = bytes.gread_with(offset, scroll::LE)?;
if let Some(idx) = table.name_offset()? {
table.real_name = Some(bytes.pread::<&str>(string_table_offset + idx)?.to_string());
}
Ok(table)
}
pub fn name_offset(&self) -> error::Result<Option<usize>> {
// Based on https://github.com/llvm-mirror/llvm/blob/af7b1832a03ab6486c42a40d21695b2c03b2d8a3/lib/Object/COFFObjectFile.cpp#L1054
if self.name[0] == b'/' {
let idx: usize = if self.name[1] == b'/' {
let b64idx = self.name.pread::<&str>(2)?;
base64_decode_string_entry(b64idx).map_err(|_| {
Error::Malformed(format!(
"Invalid indirect section name //{}: base64 decoding failed",
b64idx
))
})?
} else {
let name = self.name.pread::<&str>(1)?;
name.parse().map_err(|err| {
Error::Malformed(format!("Invalid indirect section name /{}: {}", name, err))
})?
};
Ok(Some(idx))
} else {
Ok(None)
}
}
#[allow(clippy::useless_let_if_seq)]
pub fn set_name_offset(&mut self, mut idx: usize) -> error::Result<()> {
if idx <= 9_999_999 {
// 10^7 - 1
// write!(&mut self.name[1..], "{}", idx) without using io::Write.
// We write into a temporary since we calculate digits starting at the right.
let mut name = [0; 7];
let mut len = 0;
if idx == 0 {
name[6] = b'0';
len = 1;
} else {
while idx!= 0 {
let rem = (idx % 10) as u8;
idx /= 10;
name[6 - len] = b'0' + rem;
len += 1;
}
}
self.name = [0; 8];
self.name[0] = b'/';
self.name[1..][..len].copy_from_slice(&name[7 - len..]);
Ok(())
} else if idx as u64 <= 0xfff_fff_fff {
// 64^6 - 1
self.name[0] = b'/';
self.name[1] = b'/';
for i in 0..6 {
let rem = (idx % 64) as u8;
idx /= 64;
let c = match rem {
0..=25 => b'A' + rem,
26..=51 => b'a' + rem - 26,
52..=61 => b'0' + rem - 52,
62 => b'+',
63 => b'/',
_ => unreachable!(),
};
self.name[7 - i] = c;
}
Ok(())
} else {
Err(Error::Malformed(format!(
"Invalid section name offset: {}",
idx
)))
}
}
pub fn name(&self) -> error::Result<&str> {
match self.real_name.as_ref() {
Some(s) => Ok(s),
None => Ok(self.name.pread(0)?),
}
}
pub fn relocations<'a>(&self, bytes: &'a [u8]) -> error::Result<relocation::Relocations<'a>> {
let offset = self.pointer_to_relocations as usize;
let number = self.number_of_relocations as usize;
relocation::Relocations::parse(bytes, offset, number)
}
}
impl ctx::SizeWith<scroll::Endian> for SectionTable {
fn size_with(_ctx: &scroll::Endian) -> usize {
SIZEOF_SECTION_TABLE
}
}
impl ctx::TryIntoCtx<scroll::Endian> for SectionTable {
type Error = error::Error;
fn try_into_ctx(self, bytes: &mut [u8], ctx: scroll::Endian) -> Result<usize, Self::Error> {
let offset = &mut 0;
bytes.gwrite(&self.name[..], offset)?;
bytes.gwrite_with(self.virtual_size, offset, ctx)?;
bytes.gwrite_with(self.virtual_address, offset, ctx)?;
bytes.gwrite_with(self.size_of_raw_data, offset, ctx)?;
bytes.gwrite_with(self.pointer_to_raw_data, offset, ctx)?;
bytes.gwrite_with(self.pointer_to_relocations, offset, ctx)?;
bytes.gwrite_with(self.pointer_to_linenumbers, offset, ctx)?;
bytes.gwrite_with(self.number_of_relocations, offset, ctx)?;
bytes.gwrite_with(self.number_of_linenumbers, offset, ctx)?;
bytes.gwrite_with(self.characteristics, offset, ctx)?;
Ok(SIZEOF_SECTION_TABLE)
}
}
impl ctx::IntoCtx<scroll::Endian> for SectionTable {
fn into_ctx(self, bytes: &mut [u8], ctx: scroll::Endian) {
bytes.pwrite_with(self, 0, ctx).unwrap();
}
}
/// The section should not be padded to the next boundary. This flag is obsolete and is replaced
/// by `IMAGE_SCN_ALIGN_1BYTES`. This is valid only for object files.
pub const IMAGE_SCN_TYPE_NO_PAD: u32 = 0x0000_0008;
/// The section contains executable code.
pub const IMAGE_SCN_CNT_CODE: u32 = 0x0000_0020;
/// The section contains initialized data.
pub const IMAGE_SCN_CNT_INITIALIZED_DATA: u32 = 0x0000_0040;
/// The section contains uninitialized data.
pub const IMAGE_SCN_CNT_UNINITIALIZED_DATA: u32 = 0x0000_0080;
pub const IMAGE_SCN_LNK_OTHER: u32 = 0x0000_0100;
/// The section contains comments or other information. The.drectve section has this type.
/// This is valid for object files only.
pub const IMAGE_SCN_LNK_INFO: u32 = 0x0000_0200;
/// The section will not become part of the image. This is valid only for object files.
pub const IMAGE_SCN_LNK_REMOVE: u32 = 0x0000_0800;
/// The section contains COMDAT data. This is valid only for object files.
pub const IMAGE_SCN_LNK_COMDAT: u32 = 0x0000_1000;
/// The section contains data referenced through the global pointer (GP).
pub const IMAGE_SCN_GPREL: u32 = 0x0000_8000;
pub const IMAGE_SCN_MEM_PURGEABLE: u32 = 0x0002_0000;
pub const IMAGE_SCN_MEM_16BIT: u32 = 0x0002_0000;
pub const IMAGE_SCN_MEM_LOCKED: u32 = 0x0004_0000;
pub const IMAGE_SCN_MEM_PRELOAD: u32 = 0x0008_0000;
pub const IMAGE_SCN_ALIGN_1BYTES: u32 = 0x0010_0000;
pub const IMAGE_SCN_ALIGN_2BYTES: u32 = 0x0020_0000;
pub const IMAGE_SCN_ALIGN_4BYTES: u32 = 0x0030_0000;
pub const IMAGE_SCN_ALIGN_8BYTES: u32 = 0x0040_0000;
pub const IMAGE_SCN_ALIGN_16BYTES: u32 = 0x0050_0000;
pub const IMAGE_SCN_ALIGN_32BYTES: u32 = 0x0060_0000;
pub const IMAGE_SCN_ALIGN_64BYTES: u32 = 0x0070_0000;
pub const IMAGE_SCN_ALIGN_128BYTES: u32 = 0x0080_0000;
pub const IMAGE_SCN_ALIGN_256BYTES: u32 = 0x0090_0000;
pub const IMAGE_SCN_ALIGN_512BYTES: u32 = 0x00A0_0000;
pub const IMAGE_SCN_ALIGN_1024BYTES: u32 = 0x00B0_0000;
pub const IMAGE_SCN_ALIGN_2048BYTES: u32 = 0x00C0_0000;
pub const IMAGE_SCN_ALIGN_4096BYTES: u32 = 0x00D0_0000;
pub const IMAGE_SCN_ALIGN_8192BYTES: u32 = 0x00E0_0000;
pub const IMAGE_SCN_ALIGN_MASK: u32 = 0x00F0_0000;
/// The section contains extended relocations.
pub const IMAGE_SCN_LNK_NRELOC_OVFL: u32 = 0x0100_0000;
/// The section can be discarded as needed.
pub const IMAGE_SCN_MEM_DISCARDABLE: u32 = 0x0200_0000;
/// The section cannot be cached.
pub const IMAGE_SCN_MEM_NOT_CACHED: u32 = 0x0400_0000;
/// The section is not pageable.
pub const IMAGE_SCN_MEM_NOT_PAGED: u32 = 0x0800_0000;
/// The section can be shared in memory.
pub const IMAGE_SCN_MEM_SHARED: u32 = 0x1000_0000;
/// The section can be executed as code.
pub const IMAGE_SCN_MEM_EXECUTE: u32 = 0x2000_0000;
/// The section can be read.
pub const IMAGE_SCN_MEM_READ: u32 = 0x4000_0000;
/// The section can be written to.
pub const IMAGE_SCN_MEM_WRITE: u32 = 0x8000_0000;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn set_name_offset() |
}
| {
let mut section = SectionTable::default();
for &(offset, name) in [
(0usize, b"/0\0\0\0\0\0\0"),
(1, b"/1\0\0\0\0\0\0"),
(9_999_999, b"/9999999"),
(10_000_000, b"//AAmJaA"),
#[cfg(target_pointer_width = "64")]
(0xfff_fff_fff, b"////////"),
]
.iter()
{
section.set_name_offset(offset).unwrap();
assert_eq!(§ion.name, name);
assert_eq!(section.name_offset().unwrap(), Some(offset));
}
#[cfg(target_pointer_width = "64")]
assert!(section.set_name_offset(0x1_000_000_000).is_err());
} | identifier_body |
section_table.rs | use crate::error::{self, Error};
use crate::pe::relocation;
use alloc::string::{String, ToString};
use scroll::{ctx, Pread, Pwrite};
#[repr(C)]
#[derive(Debug, PartialEq, Clone, Default)]
pub struct SectionTable {
pub name: [u8; 8],
pub real_name: Option<String>,
pub virtual_size: u32,
pub virtual_address: u32,
pub size_of_raw_data: u32,
pub pointer_to_raw_data: u32,
pub pointer_to_relocations: u32,
pub pointer_to_linenumbers: u32,
pub number_of_relocations: u16,
pub number_of_linenumbers: u16,
pub characteristics: u32,
}
pub const SIZEOF_SECTION_TABLE: usize = 8 * 5;
// Based on https://github.com/llvm-mirror/llvm/blob/af7b1832a03ab6486c42a40d21695b2c03b2d8a3/lib/Object/COFFObjectFile.cpp#L70
// Decodes a string table entry in base 64 (//AAAAAA). Expects string without
// prefixed slashes.
fn base64_decode_string_entry(s: &str) -> Result<usize, ()> {
assert!(s.len() <= 6, "String too long, possible overflow.");
let mut val = 0;
for c in s.bytes() {
let v = if b'A' <= c && c <= b'Z' {
// 00..=25
c - b'A'
} else if b'a' <= c && c <= b'z' {
// 26..=51
c - b'a' + 26
} else if b'0' <= c && c <= b'9' {
// 52..=61
c - b'0' + 52
} else if c == b'+' {
// 62
62
} else if c == b'/' {
// 63
63
} else {
return Err(());
};
val = val * 64 + v as usize;
}
Ok(val)
}
impl SectionTable {
pub fn parse(
bytes: &[u8],
offset: &mut usize,
string_table_offset: usize,
) -> error::Result<Self> {
let mut table = SectionTable::default();
let mut name = [0u8; 8];
name.copy_from_slice(bytes.gread_with(offset, 8)?);
table.name = name;
table.virtual_size = bytes.gread_with(offset, scroll::LE)?;
table.virtual_address = bytes.gread_with(offset, scroll::LE)?;
table.size_of_raw_data = bytes.gread_with(offset, scroll::LE)?;
table.pointer_to_raw_data = bytes.gread_with(offset, scroll::LE)?;
table.pointer_to_relocations = bytes.gread_with(offset, scroll::LE)?;
table.pointer_to_linenumbers = bytes.gread_with(offset, scroll::LE)?;
table.number_of_relocations = bytes.gread_with(offset, scroll::LE)?;
table.number_of_linenumbers = bytes.gread_with(offset, scroll::LE)?;
table.characteristics = bytes.gread_with(offset, scroll::LE)?;
if let Some(idx) = table.name_offset()? {
table.real_name = Some(bytes.pread::<&str>(string_table_offset + idx)?.to_string());
}
Ok(table)
}
pub fn name_offset(&self) -> error::Result<Option<usize>> {
// Based on https://github.com/llvm-mirror/llvm/blob/af7b1832a03ab6486c42a40d21695b2c03b2d8a3/lib/Object/COFFObjectFile.cpp#L1054
if self.name[0] == b'/' {
let idx: usize = if self.name[1] == b'/' {
let b64idx = self.name.pread::<&str>(2)?;
base64_decode_string_entry(b64idx).map_err(|_| {
Error::Malformed(format!(
"Invalid indirect section name //{}: base64 decoding failed",
b64idx
))
})?
} else {
let name = self.name.pread::<&str>(1)?;
name.parse().map_err(|err| {
Error::Malformed(format!("Invalid indirect section name /{}: {}", name, err))
})?
};
Ok(Some(idx))
} else {
Ok(None)
}
}
#[allow(clippy::useless_let_if_seq)]
pub fn set_name_offset(&mut self, mut idx: usize) -> error::Result<()> {
if idx <= 9_999_999 | Ok(())
}
else if idx as u64 <= 0xfff_fff_fff {
// 64^6 - 1
self.name[0] = b'/';
self.name[1] = b'/';
for i in 0..6 {
let rem = (idx % 64) as u8;
idx /= 64;
let c = match rem {
0..=25 => b'A' + rem,
26..=51 => b'a' + rem - 26,
52..=61 => b'0' + rem - 52,
62 => b'+',
63 => b'/',
_ => unreachable!(),
};
self.name[7 - i] = c;
}
Ok(())
} else {
Err(Error::Malformed(format!(
"Invalid section name offset: {}",
idx
)))
}
}
pub fn name(&self) -> error::Result<&str> {
match self.real_name.as_ref() {
Some(s) => Ok(s),
None => Ok(self.name.pread(0)?),
}
}
pub fn relocations<'a>(&self, bytes: &'a [u8]) -> error::Result<relocation::Relocations<'a>> {
let offset = self.pointer_to_relocations as usize;
let number = self.number_of_relocations as usize;
relocation::Relocations::parse(bytes, offset, number)
}
}
impl ctx::SizeWith<scroll::Endian> for SectionTable {
fn size_with(_ctx: &scroll::Endian) -> usize {
SIZEOF_SECTION_TABLE
}
}
impl ctx::TryIntoCtx<scroll::Endian> for SectionTable {
type Error = error::Error;
fn try_into_ctx(self, bytes: &mut [u8], ctx: scroll::Endian) -> Result<usize, Self::Error> {
let offset = &mut 0;
bytes.gwrite(&self.name[..], offset)?;
bytes.gwrite_with(self.virtual_size, offset, ctx)?;
bytes.gwrite_with(self.virtual_address, offset, ctx)?;
bytes.gwrite_with(self.size_of_raw_data, offset, ctx)?;
bytes.gwrite_with(self.pointer_to_raw_data, offset, ctx)?;
bytes.gwrite_with(self.pointer_to_relocations, offset, ctx)?;
bytes.gwrite_with(self.pointer_to_linenumbers, offset, ctx)?;
bytes.gwrite_with(self.number_of_relocations, offset, ctx)?;
bytes.gwrite_with(self.number_of_linenumbers, offset, ctx)?;
bytes.gwrite_with(self.characteristics, offset, ctx)?;
Ok(SIZEOF_SECTION_TABLE)
}
}
impl ctx::IntoCtx<scroll::Endian> for SectionTable {
fn into_ctx(self, bytes: &mut [u8], ctx: scroll::Endian) {
bytes.pwrite_with(self, 0, ctx).unwrap();
}
}
/// The section should not be padded to the next boundary. This flag is obsolete and is replaced
/// by `IMAGE_SCN_ALIGN_1BYTES`. This is valid only for object files.
pub const IMAGE_SCN_TYPE_NO_PAD: u32 = 0x0000_0008;
/// The section contains executable code.
pub const IMAGE_SCN_CNT_CODE: u32 = 0x0000_0020;
/// The section contains initialized data.
pub const IMAGE_SCN_CNT_INITIALIZED_DATA: u32 = 0x0000_0040;
/// The section contains uninitialized data.
pub const IMAGE_SCN_CNT_UNINITIALIZED_DATA: u32 = 0x0000_0080;
pub const IMAGE_SCN_LNK_OTHER: u32 = 0x0000_0100;
/// The section contains comments or other information. The.drectve section has this type.
/// This is valid for object files only.
pub const IMAGE_SCN_LNK_INFO: u32 = 0x0000_0200;
/// The section will not become part of the image. This is valid only for object files.
pub const IMAGE_SCN_LNK_REMOVE: u32 = 0x0000_0800;
/// The section contains COMDAT data. This is valid only for object files.
pub const IMAGE_SCN_LNK_COMDAT: u32 = 0x0000_1000;
/// The section contains data referenced through the global pointer (GP).
pub const IMAGE_SCN_GPREL: u32 = 0x0000_8000;
pub const IMAGE_SCN_MEM_PURGEABLE: u32 = 0x0002_0000;
pub const IMAGE_SCN_MEM_16BIT: u32 = 0x0002_0000;
pub const IMAGE_SCN_MEM_LOCKED: u32 = 0x0004_0000;
pub const IMAGE_SCN_MEM_PRELOAD: u32 = 0x0008_0000;
pub const IMAGE_SCN_ALIGN_1BYTES: u32 = 0x0010_0000;
pub const IMAGE_SCN_ALIGN_2BYTES: u32 = 0x0020_0000;
pub const IMAGE_SCN_ALIGN_4BYTES: u32 = 0x0030_0000;
pub const IMAGE_SCN_ALIGN_8BYTES: u32 = 0x0040_0000;
pub const IMAGE_SCN_ALIGN_16BYTES: u32 = 0x0050_0000;
pub const IMAGE_SCN_ALIGN_32BYTES: u32 = 0x0060_0000;
pub const IMAGE_SCN_ALIGN_64BYTES: u32 = 0x0070_0000;
pub const IMAGE_SCN_ALIGN_128BYTES: u32 = 0x0080_0000;
pub const IMAGE_SCN_ALIGN_256BYTES: u32 = 0x0090_0000;
pub const IMAGE_SCN_ALIGN_512BYTES: u32 = 0x00A0_0000;
pub const IMAGE_SCN_ALIGN_1024BYTES: u32 = 0x00B0_0000;
pub const IMAGE_SCN_ALIGN_2048BYTES: u32 = 0x00C0_0000;
pub const IMAGE_SCN_ALIGN_4096BYTES: u32 = 0x00D0_0000;
pub const IMAGE_SCN_ALIGN_8192BYTES: u32 = 0x00E0_0000;
pub const IMAGE_SCN_ALIGN_MASK: u32 = 0x00F0_0000;
/// The section contains extended relocations.
pub const IMAGE_SCN_LNK_NRELOC_OVFL: u32 = 0x0100_0000;
/// The section can be discarded as needed.
pub const IMAGE_SCN_MEM_DISCARDABLE: u32 = 0x0200_0000;
/// The section cannot be cached.
pub const IMAGE_SCN_MEM_NOT_CACHED: u32 = 0x0400_0000;
/// The section is not pageable.
pub const IMAGE_SCN_MEM_NOT_PAGED: u32 = 0x0800_0000;
/// The section can be shared in memory.
pub const IMAGE_SCN_MEM_SHARED: u32 = 0x1000_0000;
/// The section can be executed as code.
pub const IMAGE_SCN_MEM_EXECUTE: u32 = 0x2000_0000;
/// The section can be read.
pub const IMAGE_SCN_MEM_READ: u32 = 0x4000_0000;
/// The section can be written to.
pub const IMAGE_SCN_MEM_WRITE: u32 = 0x8000_0000;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn set_name_offset() {
let mut section = SectionTable::default();
for &(offset, name) in [
(0usize, b"/0\0\0\0\0\0\0"),
(1, b"/1\0\0\0\0\0\0"),
(9_999_999, b"/9999999"),
(10_000_000, b"//AAmJaA"),
#[cfg(target_pointer_width = "64")]
(0xfff_fff_fff, b"////////"),
]
.iter()
{
section.set_name_offset(offset).unwrap();
assert_eq!(§ion.name, name);
assert_eq!(section.name_offset().unwrap(), Some(offset));
}
#[cfg(target_pointer_width = "64")]
assert!(section.set_name_offset(0x1_000_000_000).is_err());
}
}
| {
// 10^7 - 1
// write!(&mut self.name[1..], "{}", idx) without using io::Write.
// We write into a temporary since we calculate digits starting at the right.
let mut name = [0; 7];
let mut len = 0;
if idx == 0 {
name[6] = b'0';
len = 1;
} else {
while idx != 0 {
let rem = (idx % 10) as u8;
idx /= 10;
name[6 - len] = b'0' + rem;
len += 1;
}
}
self.name = [0; 8];
self.name[0] = b'/';
self.name[1..][..len].copy_from_slice(&name[7 - len..]); | conditional_block |
section_table.rs | use crate::error::{self, Error};
use crate::pe::relocation;
use alloc::string::{String, ToString};
use scroll::{ctx, Pread, Pwrite};
#[repr(C)]
#[derive(Debug, PartialEq, Clone, Default)]
pub struct SectionTable {
pub name: [u8; 8],
pub real_name: Option<String>,
pub virtual_size: u32,
pub virtual_address: u32,
pub size_of_raw_data: u32,
pub pointer_to_raw_data: u32,
pub pointer_to_relocations: u32,
pub pointer_to_linenumbers: u32,
pub number_of_relocations: u16,
pub number_of_linenumbers: u16,
pub characteristics: u32,
}
pub const SIZEOF_SECTION_TABLE: usize = 8 * 5;
// Based on https://github.com/llvm-mirror/llvm/blob/af7b1832a03ab6486c42a40d21695b2c03b2d8a3/lib/Object/COFFObjectFile.cpp#L70
// Decodes a string table entry in base 64 (//AAAAAA). Expects string without
// prefixed slashes.
fn base64_decode_string_entry(s: &str) -> Result<usize, ()> {
assert!(s.len() <= 6, "String too long, possible overflow.");
let mut val = 0;
for c in s.bytes() {
let v = if b'A' <= c && c <= b'Z' {
// 00..=25
c - b'A'
} else if b'a' <= c && c <= b'z' {
// 26..=51
c - b'a' + 26
} else if b'0' <= c && c <= b'9' {
// 52..=61
c - b'0' + 52
} else if c == b'+' {
// 62 | } else if c == b'/' {
// 63
63
} else {
return Err(());
};
val = val * 64 + v as usize;
}
Ok(val)
}
impl SectionTable {
pub fn parse(
bytes: &[u8],
offset: &mut usize,
string_table_offset: usize,
) -> error::Result<Self> {
let mut table = SectionTable::default();
let mut name = [0u8; 8];
name.copy_from_slice(bytes.gread_with(offset, 8)?);
table.name = name;
table.virtual_size = bytes.gread_with(offset, scroll::LE)?;
table.virtual_address = bytes.gread_with(offset, scroll::LE)?;
table.size_of_raw_data = bytes.gread_with(offset, scroll::LE)?;
table.pointer_to_raw_data = bytes.gread_with(offset, scroll::LE)?;
table.pointer_to_relocations = bytes.gread_with(offset, scroll::LE)?;
table.pointer_to_linenumbers = bytes.gread_with(offset, scroll::LE)?;
table.number_of_relocations = bytes.gread_with(offset, scroll::LE)?;
table.number_of_linenumbers = bytes.gread_with(offset, scroll::LE)?;
table.characteristics = bytes.gread_with(offset, scroll::LE)?;
if let Some(idx) = table.name_offset()? {
table.real_name = Some(bytes.pread::<&str>(string_table_offset + idx)?.to_string());
}
Ok(table)
}
pub fn name_offset(&self) -> error::Result<Option<usize>> {
// Based on https://github.com/llvm-mirror/llvm/blob/af7b1832a03ab6486c42a40d21695b2c03b2d8a3/lib/Object/COFFObjectFile.cpp#L1054
if self.name[0] == b'/' {
let idx: usize = if self.name[1] == b'/' {
let b64idx = self.name.pread::<&str>(2)?;
base64_decode_string_entry(b64idx).map_err(|_| {
Error::Malformed(format!(
"Invalid indirect section name //{}: base64 decoding failed",
b64idx
))
})?
} else {
let name = self.name.pread::<&str>(1)?;
name.parse().map_err(|err| {
Error::Malformed(format!("Invalid indirect section name /{}: {}", name, err))
})?
};
Ok(Some(idx))
} else {
Ok(None)
}
}
#[allow(clippy::useless_let_if_seq)]
pub fn set_name_offset(&mut self, mut idx: usize) -> error::Result<()> {
if idx <= 9_999_999 {
// 10^7 - 1
// write!(&mut self.name[1..], "{}", idx) without using io::Write.
// We write into a temporary since we calculate digits starting at the right.
let mut name = [0; 7];
let mut len = 0;
if idx == 0 {
name[6] = b'0';
len = 1;
} else {
while idx!= 0 {
let rem = (idx % 10) as u8;
idx /= 10;
name[6 - len] = b'0' + rem;
len += 1;
}
}
self.name = [0; 8];
self.name[0] = b'/';
self.name[1..][..len].copy_from_slice(&name[7 - len..]);
Ok(())
} else if idx as u64 <= 0xfff_fff_fff {
// 64^6 - 1
self.name[0] = b'/';
self.name[1] = b'/';
for i in 0..6 {
let rem = (idx % 64) as u8;
idx /= 64;
let c = match rem {
0..=25 => b'A' + rem,
26..=51 => b'a' + rem - 26,
52..=61 => b'0' + rem - 52,
62 => b'+',
63 => b'/',
_ => unreachable!(),
};
self.name[7 - i] = c;
}
Ok(())
} else {
Err(Error::Malformed(format!(
"Invalid section name offset: {}",
idx
)))
}
}
pub fn name(&self) -> error::Result<&str> {
match self.real_name.as_ref() {
Some(s) => Ok(s),
None => Ok(self.name.pread(0)?),
}
}
pub fn relocations<'a>(&self, bytes: &'a [u8]) -> error::Result<relocation::Relocations<'a>> {
let offset = self.pointer_to_relocations as usize;
let number = self.number_of_relocations as usize;
relocation::Relocations::parse(bytes, offset, number)
}
}
impl ctx::SizeWith<scroll::Endian> for SectionTable {
fn size_with(_ctx: &scroll::Endian) -> usize {
SIZEOF_SECTION_TABLE
}
}
impl ctx::TryIntoCtx<scroll::Endian> for SectionTable {
type Error = error::Error;
fn try_into_ctx(self, bytes: &mut [u8], ctx: scroll::Endian) -> Result<usize, Self::Error> {
let offset = &mut 0;
bytes.gwrite(&self.name[..], offset)?;
bytes.gwrite_with(self.virtual_size, offset, ctx)?;
bytes.gwrite_with(self.virtual_address, offset, ctx)?;
bytes.gwrite_with(self.size_of_raw_data, offset, ctx)?;
bytes.gwrite_with(self.pointer_to_raw_data, offset, ctx)?;
bytes.gwrite_with(self.pointer_to_relocations, offset, ctx)?;
bytes.gwrite_with(self.pointer_to_linenumbers, offset, ctx)?;
bytes.gwrite_with(self.number_of_relocations, offset, ctx)?;
bytes.gwrite_with(self.number_of_linenumbers, offset, ctx)?;
bytes.gwrite_with(self.characteristics, offset, ctx)?;
Ok(SIZEOF_SECTION_TABLE)
}
}
impl ctx::IntoCtx<scroll::Endian> for SectionTable {
fn into_ctx(self, bytes: &mut [u8], ctx: scroll::Endian) {
bytes.pwrite_with(self, 0, ctx).unwrap();
}
}
/// The section should not be padded to the next boundary. This flag is obsolete and is replaced
/// by `IMAGE_SCN_ALIGN_1BYTES`. This is valid only for object files.
pub const IMAGE_SCN_TYPE_NO_PAD: u32 = 0x0000_0008;
/// The section contains executable code.
pub const IMAGE_SCN_CNT_CODE: u32 = 0x0000_0020;
/// The section contains initialized data.
pub const IMAGE_SCN_CNT_INITIALIZED_DATA: u32 = 0x0000_0040;
/// The section contains uninitialized data.
pub const IMAGE_SCN_CNT_UNINITIALIZED_DATA: u32 = 0x0000_0080;
pub const IMAGE_SCN_LNK_OTHER: u32 = 0x0000_0100;
/// The section contains comments or other information. The.drectve section has this type.
/// This is valid for object files only.
pub const IMAGE_SCN_LNK_INFO: u32 = 0x0000_0200;
/// The section will not become part of the image. This is valid only for object files.
pub const IMAGE_SCN_LNK_REMOVE: u32 = 0x0000_0800;
/// The section contains COMDAT data. This is valid only for object files.
pub const IMAGE_SCN_LNK_COMDAT: u32 = 0x0000_1000;
/// The section contains data referenced through the global pointer (GP).
pub const IMAGE_SCN_GPREL: u32 = 0x0000_8000;
pub const IMAGE_SCN_MEM_PURGEABLE: u32 = 0x0002_0000;
pub const IMAGE_SCN_MEM_16BIT: u32 = 0x0002_0000;
pub const IMAGE_SCN_MEM_LOCKED: u32 = 0x0004_0000;
pub const IMAGE_SCN_MEM_PRELOAD: u32 = 0x0008_0000;
pub const IMAGE_SCN_ALIGN_1BYTES: u32 = 0x0010_0000;
pub const IMAGE_SCN_ALIGN_2BYTES: u32 = 0x0020_0000;
pub const IMAGE_SCN_ALIGN_4BYTES: u32 = 0x0030_0000;
pub const IMAGE_SCN_ALIGN_8BYTES: u32 = 0x0040_0000;
pub const IMAGE_SCN_ALIGN_16BYTES: u32 = 0x0050_0000;
pub const IMAGE_SCN_ALIGN_32BYTES: u32 = 0x0060_0000;
pub const IMAGE_SCN_ALIGN_64BYTES: u32 = 0x0070_0000;
pub const IMAGE_SCN_ALIGN_128BYTES: u32 = 0x0080_0000;
pub const IMAGE_SCN_ALIGN_256BYTES: u32 = 0x0090_0000;
pub const IMAGE_SCN_ALIGN_512BYTES: u32 = 0x00A0_0000;
pub const IMAGE_SCN_ALIGN_1024BYTES: u32 = 0x00B0_0000;
pub const IMAGE_SCN_ALIGN_2048BYTES: u32 = 0x00C0_0000;
pub const IMAGE_SCN_ALIGN_4096BYTES: u32 = 0x00D0_0000;
pub const IMAGE_SCN_ALIGN_8192BYTES: u32 = 0x00E0_0000;
pub const IMAGE_SCN_ALIGN_MASK: u32 = 0x00F0_0000;
/// The section contains extended relocations.
pub const IMAGE_SCN_LNK_NRELOC_OVFL: u32 = 0x0100_0000;
/// The section can be discarded as needed.
pub const IMAGE_SCN_MEM_DISCARDABLE: u32 = 0x0200_0000;
/// The section cannot be cached.
pub const IMAGE_SCN_MEM_NOT_CACHED: u32 = 0x0400_0000;
/// The section is not pageable.
pub const IMAGE_SCN_MEM_NOT_PAGED: u32 = 0x0800_0000;
/// The section can be shared in memory.
pub const IMAGE_SCN_MEM_SHARED: u32 = 0x1000_0000;
/// The section can be executed as code.
pub const IMAGE_SCN_MEM_EXECUTE: u32 = 0x2000_0000;
/// The section can be read.
pub const IMAGE_SCN_MEM_READ: u32 = 0x4000_0000;
/// The section can be written to.
pub const IMAGE_SCN_MEM_WRITE: u32 = 0x8000_0000;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn set_name_offset() {
let mut section = SectionTable::default();
for &(offset, name) in [
(0usize, b"/0\0\0\0\0\0\0"),
(1, b"/1\0\0\0\0\0\0"),
(9_999_999, b"/9999999"),
(10_000_000, b"//AAmJaA"),
#[cfg(target_pointer_width = "64")]
(0xfff_fff_fff, b"////////"),
]
.iter()
{
section.set_name_offset(offset).unwrap();
assert_eq!(§ion.name, name);
assert_eq!(section.name_offset().unwrap(), Some(offset));
}
#[cfg(target_pointer_width = "64")]
assert!(section.set_name_offset(0x1_000_000_000).is_err());
}
} | 62 | random_line_split |
section_table.rs | use crate::error::{self, Error};
use crate::pe::relocation;
use alloc::string::{String, ToString};
use scroll::{ctx, Pread, Pwrite};
#[repr(C)]
#[derive(Debug, PartialEq, Clone, Default)]
pub struct SectionTable {
pub name: [u8; 8],
pub real_name: Option<String>,
pub virtual_size: u32,
pub virtual_address: u32,
pub size_of_raw_data: u32,
pub pointer_to_raw_data: u32,
pub pointer_to_relocations: u32,
pub pointer_to_linenumbers: u32,
pub number_of_relocations: u16,
pub number_of_linenumbers: u16,
pub characteristics: u32,
}
pub const SIZEOF_SECTION_TABLE: usize = 8 * 5;
// Based on https://github.com/llvm-mirror/llvm/blob/af7b1832a03ab6486c42a40d21695b2c03b2d8a3/lib/Object/COFFObjectFile.cpp#L70
// Decodes a string table entry in base 64 (//AAAAAA). Expects string without
// prefixed slashes.
fn base64_decode_string_entry(s: &str) -> Result<usize, ()> {
assert!(s.len() <= 6, "String too long, possible overflow.");
let mut val = 0;
for c in s.bytes() {
let v = if b'A' <= c && c <= b'Z' {
// 00..=25
c - b'A'
} else if b'a' <= c && c <= b'z' {
// 26..=51
c - b'a' + 26
} else if b'0' <= c && c <= b'9' {
// 52..=61
c - b'0' + 52
} else if c == b'+' {
// 62
62
} else if c == b'/' {
// 63
63
} else {
return Err(());
};
val = val * 64 + v as usize;
}
Ok(val)
}
impl SectionTable {
pub fn parse(
bytes: &[u8],
offset: &mut usize,
string_table_offset: usize,
) -> error::Result<Self> {
let mut table = SectionTable::default();
let mut name = [0u8; 8];
name.copy_from_slice(bytes.gread_with(offset, 8)?);
table.name = name;
table.virtual_size = bytes.gread_with(offset, scroll::LE)?;
table.virtual_address = bytes.gread_with(offset, scroll::LE)?;
table.size_of_raw_data = bytes.gread_with(offset, scroll::LE)?;
table.pointer_to_raw_data = bytes.gread_with(offset, scroll::LE)?;
table.pointer_to_relocations = bytes.gread_with(offset, scroll::LE)?;
table.pointer_to_linenumbers = bytes.gread_with(offset, scroll::LE)?;
table.number_of_relocations = bytes.gread_with(offset, scroll::LE)?;
table.number_of_linenumbers = bytes.gread_with(offset, scroll::LE)?;
table.characteristics = bytes.gread_with(offset, scroll::LE)?;
if let Some(idx) = table.name_offset()? {
table.real_name = Some(bytes.pread::<&str>(string_table_offset + idx)?.to_string());
}
Ok(table)
}
pub fn name_offset(&self) -> error::Result<Option<usize>> {
// Based on https://github.com/llvm-mirror/llvm/blob/af7b1832a03ab6486c42a40d21695b2c03b2d8a3/lib/Object/COFFObjectFile.cpp#L1054
if self.name[0] == b'/' {
let idx: usize = if self.name[1] == b'/' {
let b64idx = self.name.pread::<&str>(2)?;
base64_decode_string_entry(b64idx).map_err(|_| {
Error::Malformed(format!(
"Invalid indirect section name //{}: base64 decoding failed",
b64idx
))
})?
} else {
let name = self.name.pread::<&str>(1)?;
name.parse().map_err(|err| {
Error::Malformed(format!("Invalid indirect section name /{}: {}", name, err))
})?
};
Ok(Some(idx))
} else {
Ok(None)
}
}
#[allow(clippy::useless_let_if_seq)]
pub fn set_name_offset(&mut self, mut idx: usize) -> error::Result<()> {
if idx <= 9_999_999 {
// 10^7 - 1
// write!(&mut self.name[1..], "{}", idx) without using io::Write.
// We write into a temporary since we calculate digits starting at the right.
let mut name = [0; 7];
let mut len = 0;
if idx == 0 {
name[6] = b'0';
len = 1;
} else {
while idx!= 0 {
let rem = (idx % 10) as u8;
idx /= 10;
name[6 - len] = b'0' + rem;
len += 1;
}
}
self.name = [0; 8];
self.name[0] = b'/';
self.name[1..][..len].copy_from_slice(&name[7 - len..]);
Ok(())
} else if idx as u64 <= 0xfff_fff_fff {
// 64^6 - 1
self.name[0] = b'/';
self.name[1] = b'/';
for i in 0..6 {
let rem = (idx % 64) as u8;
idx /= 64;
let c = match rem {
0..=25 => b'A' + rem,
26..=51 => b'a' + rem - 26,
52..=61 => b'0' + rem - 52,
62 => b'+',
63 => b'/',
_ => unreachable!(),
};
self.name[7 - i] = c;
}
Ok(())
} else {
Err(Error::Malformed(format!(
"Invalid section name offset: {}",
idx
)))
}
}
pub fn name(&self) -> error::Result<&str> {
match self.real_name.as_ref() {
Some(s) => Ok(s),
None => Ok(self.name.pread(0)?),
}
}
pub fn relocations<'a>(&self, bytes: &'a [u8]) -> error::Result<relocation::Relocations<'a>> {
let offset = self.pointer_to_relocations as usize;
let number = self.number_of_relocations as usize;
relocation::Relocations::parse(bytes, offset, number)
}
}
impl ctx::SizeWith<scroll::Endian> for SectionTable {
fn | (_ctx: &scroll::Endian) -> usize {
SIZEOF_SECTION_TABLE
}
}
impl ctx::TryIntoCtx<scroll::Endian> for SectionTable {
type Error = error::Error;
fn try_into_ctx(self, bytes: &mut [u8], ctx: scroll::Endian) -> Result<usize, Self::Error> {
let offset = &mut 0;
bytes.gwrite(&self.name[..], offset)?;
bytes.gwrite_with(self.virtual_size, offset, ctx)?;
bytes.gwrite_with(self.virtual_address, offset, ctx)?;
bytes.gwrite_with(self.size_of_raw_data, offset, ctx)?;
bytes.gwrite_with(self.pointer_to_raw_data, offset, ctx)?;
bytes.gwrite_with(self.pointer_to_relocations, offset, ctx)?;
bytes.gwrite_with(self.pointer_to_linenumbers, offset, ctx)?;
bytes.gwrite_with(self.number_of_relocations, offset, ctx)?;
bytes.gwrite_with(self.number_of_linenumbers, offset, ctx)?;
bytes.gwrite_with(self.characteristics, offset, ctx)?;
Ok(SIZEOF_SECTION_TABLE)
}
}
impl ctx::IntoCtx<scroll::Endian> for SectionTable {
fn into_ctx(self, bytes: &mut [u8], ctx: scroll::Endian) {
bytes.pwrite_with(self, 0, ctx).unwrap();
}
}
/// The section should not be padded to the next boundary. This flag is obsolete and is replaced
/// by `IMAGE_SCN_ALIGN_1BYTES`. This is valid only for object files.
pub const IMAGE_SCN_TYPE_NO_PAD: u32 = 0x0000_0008;
/// The section contains executable code.
pub const IMAGE_SCN_CNT_CODE: u32 = 0x0000_0020;
/// The section contains initialized data.
pub const IMAGE_SCN_CNT_INITIALIZED_DATA: u32 = 0x0000_0040;
/// The section contains uninitialized data.
pub const IMAGE_SCN_CNT_UNINITIALIZED_DATA: u32 = 0x0000_0080;
pub const IMAGE_SCN_LNK_OTHER: u32 = 0x0000_0100;
/// The section contains comments or other information. The.drectve section has this type.
/// This is valid for object files only.
pub const IMAGE_SCN_LNK_INFO: u32 = 0x0000_0200;
/// The section will not become part of the image. This is valid only for object files.
pub const IMAGE_SCN_LNK_REMOVE: u32 = 0x0000_0800;
/// The section contains COMDAT data. This is valid only for object files.
pub const IMAGE_SCN_LNK_COMDAT: u32 = 0x0000_1000;
/// The section contains data referenced through the global pointer (GP).
pub const IMAGE_SCN_GPREL: u32 = 0x0000_8000;
pub const IMAGE_SCN_MEM_PURGEABLE: u32 = 0x0002_0000;
pub const IMAGE_SCN_MEM_16BIT: u32 = 0x0002_0000;
pub const IMAGE_SCN_MEM_LOCKED: u32 = 0x0004_0000;
pub const IMAGE_SCN_MEM_PRELOAD: u32 = 0x0008_0000;
pub const IMAGE_SCN_ALIGN_1BYTES: u32 = 0x0010_0000;
pub const IMAGE_SCN_ALIGN_2BYTES: u32 = 0x0020_0000;
pub const IMAGE_SCN_ALIGN_4BYTES: u32 = 0x0030_0000;
pub const IMAGE_SCN_ALIGN_8BYTES: u32 = 0x0040_0000;
pub const IMAGE_SCN_ALIGN_16BYTES: u32 = 0x0050_0000;
pub const IMAGE_SCN_ALIGN_32BYTES: u32 = 0x0060_0000;
pub const IMAGE_SCN_ALIGN_64BYTES: u32 = 0x0070_0000;
pub const IMAGE_SCN_ALIGN_128BYTES: u32 = 0x0080_0000;
pub const IMAGE_SCN_ALIGN_256BYTES: u32 = 0x0090_0000;
pub const IMAGE_SCN_ALIGN_512BYTES: u32 = 0x00A0_0000;
pub const IMAGE_SCN_ALIGN_1024BYTES: u32 = 0x00B0_0000;
pub const IMAGE_SCN_ALIGN_2048BYTES: u32 = 0x00C0_0000;
pub const IMAGE_SCN_ALIGN_4096BYTES: u32 = 0x00D0_0000;
pub const IMAGE_SCN_ALIGN_8192BYTES: u32 = 0x00E0_0000;
pub const IMAGE_SCN_ALIGN_MASK: u32 = 0x00F0_0000;
/// The section contains extended relocations.
pub const IMAGE_SCN_LNK_NRELOC_OVFL: u32 = 0x0100_0000;
/// The section can be discarded as needed.
pub const IMAGE_SCN_MEM_DISCARDABLE: u32 = 0x0200_0000;
/// The section cannot be cached.
pub const IMAGE_SCN_MEM_NOT_CACHED: u32 = 0x0400_0000;
/// The section is not pageable.
pub const IMAGE_SCN_MEM_NOT_PAGED: u32 = 0x0800_0000;
/// The section can be shared in memory.
pub const IMAGE_SCN_MEM_SHARED: u32 = 0x1000_0000;
/// The section can be executed as code.
pub const IMAGE_SCN_MEM_EXECUTE: u32 = 0x2000_0000;
/// The section can be read.
pub const IMAGE_SCN_MEM_READ: u32 = 0x4000_0000;
/// The section can be written to.
pub const IMAGE_SCN_MEM_WRITE: u32 = 0x8000_0000;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn set_name_offset() {
let mut section = SectionTable::default();
for &(offset, name) in [
(0usize, b"/0\0\0\0\0\0\0"),
(1, b"/1\0\0\0\0\0\0"),
(9_999_999, b"/9999999"),
(10_000_000, b"//AAmJaA"),
#[cfg(target_pointer_width = "64")]
(0xfff_fff_fff, b"////////"),
]
.iter()
{
section.set_name_offset(offset).unwrap();
assert_eq!(§ion.name, name);
assert_eq!(section.name_offset().unwrap(), Some(offset));
}
#[cfg(target_pointer_width = "64")]
assert!(section.set_name_offset(0x1_000_000_000).is_err());
}
}
| size_with | identifier_name |
mod.rs | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::CNDTR2 {
#[doc = r" Modifies the contents of the register"]
#[inline(always)]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline(always)]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline(always)]
pub fn reset(&self) {
self.write(|w| w)
}
} | #[doc = r" Value of the field"]
pub struct NDTR {
bits: u16,
}
impl NDTR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _NDTW<'a> {
w: &'a mut W,
}
impl<'a> _NDTW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
self.w.bits &=!((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:15 - Number of data to transfer"]
#[inline(always)]
pub fn ndt(&self) -> NDTR {
let bits = {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
NDTR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline(always)]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:15 - Number of data to transfer"]
#[inline(always)]
pub fn ndt(&mut self) -> _NDTW {
_NDTW { w: self }
}
} | random_line_split |
|
mod.rs | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::CNDTR2 {
#[doc = r" Modifies the contents of the register"]
#[inline(always)]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline(always)]
pub fn | <F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline(always)]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct NDTR {
bits: u16,
}
impl NDTR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _NDTW<'a> {
w: &'a mut W,
}
impl<'a> _NDTW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
self.w.bits &=!((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:15 - Number of data to transfer"]
#[inline(always)]
pub fn ndt(&self) -> NDTR {
let bits = {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
NDTR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline(always)]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:15 - Number of data to transfer"]
#[inline(always)]
pub fn ndt(&mut self) -> _NDTW {
_NDTW { w: self }
}
}
| write | identifier_name |
mod.rs | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::CNDTR2 {
#[doc = r" Modifies the contents of the register"]
#[inline(always)]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
|
#[doc = r" Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline(always)]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline(always)]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct NDTR {
bits: u16,
}
impl NDTR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _NDTW<'a> {
w: &'a mut W,
}
impl<'a> _NDTW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
self.w.bits &=!((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:15 - Number of data to transfer"]
#[inline(always)]
pub fn ndt(&self) -> NDTR {
let bits = {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
NDTR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline(always)]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:15 - Number of data to transfer"]
#[inline(always)]
pub fn ndt(&mut self) -> _NDTW {
_NDTW { w: self }
}
}
| {
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
} | identifier_body |
lint-ctypes.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(ctypes)]
extern crate libc;
extern {
pub fn bare_type1(size: int); //~ ERROR: found rust type
pub fn bare_type2(size: uint); //~ ERROR: found rust type
pub fn ptr_type1(size: *int); //~ ERROR: found rust type
pub fn ptr_type2(size: *uint); //~ ERROR: found rust type
pub fn good1(size: *libc::c_int);
pub fn good2(size: *libc::c_uint);
}
fn | () {
}
| main | identifier_name |
lint-ctypes.rs | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(ctypes)]
extern crate libc;
extern {
pub fn bare_type1(size: int); //~ ERROR: found rust type
pub fn bare_type2(size: uint); //~ ERROR: found rust type
pub fn ptr_type1(size: *int); //~ ERROR: found rust type
pub fn ptr_type2(size: *uint); //~ ERROR: found rust type
pub fn good1(size: *libc::c_int);
pub fn good2(size: *libc::c_uint);
}
fn main() {
} | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
// | random_line_split |
|
lint-ctypes.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(ctypes)]
extern crate libc;
extern {
pub fn bare_type1(size: int); //~ ERROR: found rust type
pub fn bare_type2(size: uint); //~ ERROR: found rust type
pub fn ptr_type1(size: *int); //~ ERROR: found rust type
pub fn ptr_type2(size: *uint); //~ ERROR: found rust type
pub fn good1(size: *libc::c_int);
pub fn good2(size: *libc::c_uint);
}
fn main() | {
} | identifier_body |
|
4_traitsbounds_mistake1.rs | /* Writing a function which adds 2 to every element of vector and a function to multiply 2
to every element of the vector */
/* NOTES: OWNERSHIP AND BORROW RULES
1. Only one owner at a time
2. Only 1 active mutable borrow at a time
3. Every other borrow after a shared borrow should be a shared borrow
*/
trait Arith:Copy{
fn add(self, b: Self) -> Self;
fn mult(self, b: Self) -> Self;
fn print(self);
}
impl Arith for i32{
fn add(self, b: i32) -> i32{
self + b
}
fn mult(self, b: Self) -> Self{
self * b
}
fn print(self) {
println!("Val = {}", self);
} | }
fn vec_add<T: Arith>(vec: &mut Vec<T>){
for e in vec.iter_mut(){
/* e is of type &mut i32. But you can give it to print() which
expects i32 because rust derefs it implicitly */
e.print();
e.add(5);
}
}
fn main(){
println!("Hello World");
let mut vec: Vec<i32> = vec![1,2,3,4,5];
vec_add(&mut vec);
}
/*
What's the mistake with e.add(5) which is throwing below error. Isn't 'b:Self' of type i32 for the current example
<anon>:35:15: 35:16 error: mismatched types:
expected `T`,
found `_`
(expected type parameter,
found integral variable) [E0308]
<anon>:35 e.add(5);
ANS: Rust won't know the type of 'e' during compile time. It just knows that 'e' is of type 'T' and type 'T'
is implementing trait 'Arithmatic'.
It'll just compare : T.add(5) <--> fn add(val: T, b: T) because Self is implementors type
so you are comparing 5 and T here which is wrong
Practically, lets say you have implemented 'Arith' for 'f32'. e.add(5) cannot be correct for both i32 and f32
at the same time because 'f32' expects 'f32' as argument.
*/ | random_line_split |
|
4_traitsbounds_mistake1.rs | /* Writing a function which adds 2 to every element of vector and a function to multiply 2
to every element of the vector */
/* NOTES: OWNERSHIP AND BORROW RULES
1. Only one owner at a time
2. Only 1 active mutable borrow at a time
3. Every other borrow after a shared borrow should be a shared borrow
*/
trait Arith:Copy{
fn add(self, b: Self) -> Self;
fn mult(self, b: Self) -> Self;
fn print(self);
}
impl Arith for i32{
fn | (self, b: i32) -> i32{
self + b
}
fn mult(self, b: Self) -> Self{
self * b
}
fn print(self) {
println!("Val = {}", self);
}
}
fn vec_add<T: Arith>(vec: &mut Vec<T>){
for e in vec.iter_mut(){
/* e is of type &mut i32. But you can give it to print() which
expects i32 because rust derefs it implicitly */
e.print();
e.add(5);
}
}
fn main(){
println!("Hello World");
let mut vec: Vec<i32> = vec![1,2,3,4,5];
vec_add(&mut vec);
}
/*
What's the mistake with e.add(5) which is throwing below error. Isn't 'b:Self' of type i32 for the current example
<anon>:35:15: 35:16 error: mismatched types:
expected `T`,
found `_`
(expected type parameter,
found integral variable) [E0308]
<anon>:35 e.add(5);
ANS: Rust won't know the type of 'e' during compile time. It just knows that 'e' is of type 'T' and type 'T'
is implementing trait 'Arithmatic'.
It'll just compare : T.add(5) <--> fn add(val: T, b: T) because Self is implementors type
so you are comparing 5 and T here which is wrong
Practically, lets say you have implemented 'Arith' for 'f32'. e.add(5) cannot be correct for both i32 and f32
at the same time because 'f32' expects 'f32' as argument.
*/
| add | identifier_name |
4_traitsbounds_mistake1.rs | /* Writing a function which adds 2 to every element of vector and a function to multiply 2
to every element of the vector */
/* NOTES: OWNERSHIP AND BORROW RULES
1. Only one owner at a time
2. Only 1 active mutable borrow at a time
3. Every other borrow after a shared borrow should be a shared borrow
*/
trait Arith:Copy{
fn add(self, b: Self) -> Self;
fn mult(self, b: Self) -> Self;
fn print(self);
}
impl Arith for i32{
fn add(self, b: i32) -> i32{
self + b
}
fn mult(self, b: Self) -> Self{
self * b
}
fn print(self) {
println!("Val = {}", self);
}
}
fn vec_add<T: Arith>(vec: &mut Vec<T>) |
fn main(){
println!("Hello World");
let mut vec: Vec<i32> = vec![1,2,3,4,5];
vec_add(&mut vec);
}
/*
What's the mistake with e.add(5) which is throwing below error. Isn't 'b:Self' of type i32 for the current example
<anon>:35:15: 35:16 error: mismatched types:
expected `T`,
found `_`
(expected type parameter,
found integral variable) [E0308]
<anon>:35 e.add(5);
ANS: Rust won't know the type of 'e' during compile time. It just knows that 'e' is of type 'T' and type 'T'
is implementing trait 'Arithmatic'.
It'll just compare : T.add(5) <--> fn add(val: T, b: T) because Self is implementors type
so you are comparing 5 and T here which is wrong
Practically, lets say you have implemented 'Arith' for 'f32'. e.add(5) cannot be correct for both i32 and f32
at the same time because 'f32' expects 'f32' as argument.
*/
| {
for e in vec.iter_mut(){
/* e is of type &mut i32. But you can give it to print() which
expects i32 because rust derefs it implicitly */
e.print();
e.add(5);
}
} | identifier_body |
fn.rs | // Function that returns a boolean value
fn is_divisible_by(lhs: uint, rhs: uint) -> bool {
// Corner case, early return
if rhs == 0 {
return false; |
// This is an expression, the `return` keyword is not necessary here
lhs % rhs == 0
}
// Functions that "don't" return a value, actually return the unit type `()`
fn fizzbuzz(n: uint) -> () {
if is_divisible_by(n, 15) {
println!("fizzbuzz");
} else if is_divisible_by(n, 3) {
println!("fizz");
} else if is_divisible_by(n, 5) {
println!("buzz");
} else {
println!("{}", n);
}
}
// When a function returns `()`, the return type can be omitted from the
// signature
fn fizzbuzz_to(n: uint) {
for n in range(1, n + 1) {
fizzbuzz(n);
}
}
fn main() {
fizzbuzz_to(100);
} | } | random_line_split |
fn.rs | // Function that returns a boolean value
fn is_divisible_by(lhs: uint, rhs: uint) -> bool {
// Corner case, early return
if rhs == 0 {
return false;
}
// This is an expression, the `return` keyword is not necessary here
lhs % rhs == 0
}
// Functions that "don't" return a value, actually return the unit type `()`
fn fizzbuzz(n: uint) -> () {
if is_divisible_by(n, 15) {
println!("fizzbuzz");
} else if is_divisible_by(n, 3) {
println!("fizz");
} else if is_divisible_by(n, 5) {
println!("buzz");
} else {
println!("{}", n);
}
}
// When a function returns `()`, the return type can be omitted from the
// signature
fn | (n: uint) {
for n in range(1, n + 1) {
fizzbuzz(n);
}
}
fn main() {
fizzbuzz_to(100);
}
| fizzbuzz_to | identifier_name |
fn.rs | // Function that returns a boolean value
fn is_divisible_by(lhs: uint, rhs: uint) -> bool {
// Corner case, early return
if rhs == 0 {
return false;
}
// This is an expression, the `return` keyword is not necessary here
lhs % rhs == 0
}
// Functions that "don't" return a value, actually return the unit type `()`
fn fizzbuzz(n: uint) -> () {
if is_divisible_by(n, 15) {
println!("fizzbuzz");
} else if is_divisible_by(n, 3) {
println!("fizz");
} else if is_divisible_by(n, 5) {
println!("buzz");
} else |
}
// When a function returns `()`, the return type can be omitted from the
// signature
fn fizzbuzz_to(n: uint) {
for n in range(1, n + 1) {
fizzbuzz(n);
}
}
fn main() {
fizzbuzz_to(100);
}
| {
println!("{}", n);
} | conditional_block |
fn.rs | // Function that returns a boolean value
fn is_divisible_by(lhs: uint, rhs: uint) -> bool {
// Corner case, early return
if rhs == 0 {
return false;
}
// This is an expression, the `return` keyword is not necessary here
lhs % rhs == 0
}
// Functions that "don't" return a value, actually return the unit type `()`
fn fizzbuzz(n: uint) -> () {
if is_divisible_by(n, 15) {
println!("fizzbuzz");
} else if is_divisible_by(n, 3) {
println!("fizz");
} else if is_divisible_by(n, 5) {
println!("buzz");
} else {
println!("{}", n);
}
}
// When a function returns `()`, the return type can be omitted from the
// signature
fn fizzbuzz_to(n: uint) |
fn main() {
fizzbuzz_to(100);
}
| {
for n in range(1, n + 1) {
fizzbuzz(n);
}
} | identifier_body |
cmp.rs | #![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::cmp::Ordering::{self, Less, Equal, Greater};
// macro_rules! ord_impl {
// ($($t:ty)*) => ($(
// #[stable(feature = "rust1", since = "1.0.0")]
// impl Ord for $t {
// #[inline]
// fn cmp(&self, other: &$t) -> Ordering {
// if *self < *other { Less }
// else if *self > *other { Greater }
// else { Equal }
// }
// } | // }
// ord_impl! { char usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
macro_rules! cmp_test {
($($t:ty)*) => ($({
let v1: $t = 68 as $t;
{
let result: Ordering = v1.cmp(&v1);
assert_eq!(result, Equal);
}
let v2: $t = 100 as $t;
{
let result: Ordering = v1.cmp(&v2);
assert_eq!(result, Less);
}
{
let result: Ordering = v2.cmp(&v1);
assert_eq!(result, Greater);
}
})*)
}
#[test]
fn cmp_test1() {
cmp_test! { char usize u8 u16 u32 u64 isize i8 i16 i32 i64 };
}
} | // )*) | random_line_split |
cmp.rs | #![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::cmp::Ordering::{self, Less, Equal, Greater};
// macro_rules! ord_impl {
// ($($t:ty)*) => ($(
// #[stable(feature = "rust1", since = "1.0.0")]
// impl Ord for $t {
// #[inline]
// fn cmp(&self, other: &$t) -> Ordering {
// if *self < *other { Less }
// else if *self > *other { Greater }
// else { Equal }
// }
// }
// )*)
// }
// ord_impl! { char usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
macro_rules! cmp_test {
($($t:ty)*) => ($({
let v1: $t = 68 as $t;
{
let result: Ordering = v1.cmp(&v1);
assert_eq!(result, Equal);
}
let v2: $t = 100 as $t;
{
let result: Ordering = v1.cmp(&v2);
assert_eq!(result, Less);
}
{
let result: Ordering = v2.cmp(&v1);
assert_eq!(result, Greater);
}
})*)
}
#[test]
fn cmp_test1() |
}
| {
cmp_test! { char usize u8 u16 u32 u64 isize i8 i16 i32 i64 };
} | identifier_body |
cmp.rs | #![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::cmp::Ordering::{self, Less, Equal, Greater};
// macro_rules! ord_impl {
// ($($t:ty)*) => ($(
// #[stable(feature = "rust1", since = "1.0.0")]
// impl Ord for $t {
// #[inline]
// fn cmp(&self, other: &$t) -> Ordering {
// if *self < *other { Less }
// else if *self > *other { Greater }
// else { Equal }
// }
// }
// )*)
// }
// ord_impl! { char usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
macro_rules! cmp_test {
($($t:ty)*) => ($({
let v1: $t = 68 as $t;
{
let result: Ordering = v1.cmp(&v1);
assert_eq!(result, Equal);
}
let v2: $t = 100 as $t;
{
let result: Ordering = v1.cmp(&v2);
assert_eq!(result, Less);
}
{
let result: Ordering = v2.cmp(&v1);
assert_eq!(result, Greater);
}
})*)
}
#[test]
fn | () {
cmp_test! { char usize u8 u16 u32 u64 isize i8 i16 i32 i64 };
}
}
| cmp_test1 | identifier_name |
mod.rs | extern crate chrono;
extern crate chrono_tz;
extern crate clap;
extern crate ical;
use std::fs::File;
use std::io::BufReader;
use self::chrono::{DateTime, TimeZone};
use self::chrono_tz::{Tz, UTC};
use self::clap::SubCommand;
use self::ical::IcalParser;
#[derive(Debug)]
enum LineInfo {
TextInfo {
name: String,
value: Option<String>,
},
DateTimeInfo {
name: String,
value: DateTime<UTC>,
}
}
pub fn entry(m: &SubCommand) -> i32 {
let fname = m.matches.value_of("filename").unwrap();
let file = match File::open(fname) {
Ok(f) => f,
Err(f) => {
println!("{}", f);
return 1;
}
}; |
let input = IcalParser::new(BufReader::new(file));
for line in input {
let line = line.unwrap();
for evt in line.events {
println!("EVENT");
for prop in evt.properties {
let parsedval: LineInfo = match prop.value {
Some(mut v) => {
let tz = if (&v).ends_with("Z") {
v.pop();
UTC
} else {
match prop.params {
Some(params) => {
for &(ref pk, ref pv) in ¶ms {
if pk == "TZID" {
match pv[0].parse::<Tz>() {
Ok(tz) => tz,
Err(_) => UTC,
}
}
}
UTC
}
None => UTC,
}
};
match tz.datetime_from_str(&v, "%Y%m%dT%H%M%S") {
Ok(v) => LineInfo::DateTimeInfo { name: prop.name, value: v },
Err(_) => LineInfo::TextInfo { name: prop.name, value: Some(v) },
}
}
None => LineInfo::TextInfo { name: prop.name, value: None },
};
println!("{:?}", parsedval);
}
}
}
0
} | random_line_split |
|
mod.rs | extern crate chrono;
extern crate chrono_tz;
extern crate clap;
extern crate ical;
use std::fs::File;
use std::io::BufReader;
use self::chrono::{DateTime, TimeZone};
use self::chrono_tz::{Tz, UTC};
use self::clap::SubCommand;
use self::ical::IcalParser;
#[derive(Debug)]
enum | {
TextInfo {
name: String,
value: Option<String>,
},
DateTimeInfo {
name: String,
value: DateTime<UTC>,
}
}
pub fn entry(m: &SubCommand) -> i32 {
let fname = m.matches.value_of("filename").unwrap();
let file = match File::open(fname) {
Ok(f) => f,
Err(f) => {
println!("{}", f);
return 1;
}
};
let input = IcalParser::new(BufReader::new(file));
for line in input {
let line = line.unwrap();
for evt in line.events {
println!("EVENT");
for prop in evt.properties {
let parsedval: LineInfo = match prop.value {
Some(mut v) => {
let tz = if (&v).ends_with("Z") {
v.pop();
UTC
} else {
match prop.params {
Some(params) => {
for &(ref pk, ref pv) in ¶ms {
if pk == "TZID" {
match pv[0].parse::<Tz>() {
Ok(tz) => tz,
Err(_) => UTC,
}
}
}
UTC
}
None => UTC,
}
};
match tz.datetime_from_str(&v, "%Y%m%dT%H%M%S") {
Ok(v) => LineInfo::DateTimeInfo { name: prop.name, value: v },
Err(_) => LineInfo::TextInfo { name: prop.name, value: Some(v) },
}
}
None => LineInfo::TextInfo { name: prop.name, value: None },
};
println!("{:?}", parsedval);
}
}
}
0
}
| LineInfo | identifier_name |
tester.rs | use std::comm;
use std::fmt::Show;
use std::io::ChanWriter;
use std::iter;
use std::rand;
use std::task::TaskBuilder;
use super::{Arbitrary, Gen, Shrinker, StdGen};
use tester::trap::safe;
use tester::Status::{Discard, Fail, Pass};
/// The main QuickCheck type for setting configuration and running QuickCheck.
pub struct QuickCheck<G> {
tests: uint,
max_tests: uint,
gen: G,
}
impl QuickCheck<StdGen<rand::TaskRng>> {
/// Creates a new QuickCheck value.
///
/// This can be used to run QuickCheck on things that implement
/// `Testable`. You may also adjust the configuration, such as
/// the number of tests to run.
///
/// By default, the maximum number of passed tests is set to `100`,
/// the max number of overall tests is set to `10000` and the generator
/// is set to a `StdGen` with a default size of `100`.
pub fn new() -> QuickCheck<StdGen<rand::TaskRng>> {
QuickCheck {
tests: 100,
max_tests: 10000,
gen: StdGen::new(rand::task_rng(), 100),
}
}
}
impl<G: Gen> QuickCheck<G> {
/// Set the number of tests to run.
///
/// This actually refers to the maximum number of *passed* tests that
/// can occur. Namely, if a test causes a failure, future testing on that
/// property stops. Additionally, if tests are discarded, there may be
/// fewer than `tests` passed.
pub fn tests(mut self, tests: uint) -> QuickCheck<G> {
self.tests = tests;
self
}
/// Set the maximum number of tests to run.
///
/// The number of invocations of a property will never exceed this number.
/// This is necessary to cap the number of tests because QuickCheck
/// properties can discard tests.
pub fn max_tests(mut self, max_tests: uint) -> QuickCheck<G> {
self.max_tests = max_tests;
self
}
/// Set the random number generator to be used by QuickCheck.
pub fn gen(mut self, gen: G) -> QuickCheck<G> {
self.gen = gen;
self
}
/// Tests a property and returns the result.
///
/// The result returned is either the number of tests passed or a witness
/// of failure.
///
/// (If you're using Rust's unit testing infrastructure, then you'll
/// want to use the `quickcheck` method, which will `panic!` on failure.)
pub fn quicktest<A>(&mut self, f: A) -> Result<uint, TestResult>
where A: Testable {
let mut ntests: uint = 0;
for _ in iter::range(0, self.max_tests) {
if ntests >= self.tests {
break
}
let r = f.result(&mut self.gen);
match r.status {
Pass => ntests += 1,
Discard => continue,
Fail => return Err(r),
}
}
Ok(ntests)
}
/// Tests a property and calls `panic!` on failure.
///
/// The `panic!` message will include a (hopefully) minimal witness of
/// failure.
///
/// It is appropriate to use this method with Rust's unit testing
/// infrastructure.
///
/// Note that if the environment variable `RUST_LOG` is set to enable
/// `info` level log messages for the `quickcheck` crate, then this will
/// include output on how many QuickCheck tests were passed.
///
/// # Example
///
/// ```rust
/// use quickcheck::QuickCheck;
///
/// fn prop_reverse_reverse() {
/// fn revrev(xs: Vec<uint>) -> bool {
/// let rev: Vec<uint> = xs.clone().into_iter().rev().collect();
/// let revrev = rev.into_iter().rev().collect();
/// xs == revrev
/// }
/// QuickCheck::new().quickcheck(revrev);
/// }
/// ```
pub fn quickcheck<A>(&mut self, f: A) where A: Testable {
match self.quicktest(f) {
Ok(ntests) => info!("(Passed {} QuickCheck tests.)", ntests),
Err(result) => panic!(result.failed_msg()),
}
}
}
/// Convenience function for running QuickCheck.
///
/// This is an alias for `QuickCheck::new().quickcheck(f)`.
pub fn quickcheck<A: Testable>(f: A) { QuickCheck::new().quickcheck(f) }
/// Describes the status of a single instance of a test.
///
/// All testable things must be capable of producing a `TestResult`.
#[deriving(Clone, Show)]
pub struct TestResult {
status: Status,
arguments: Vec<String>,
err: String,
}
/// Whether a test has passed, failed or been discarded.
#[deriving(Clone, Show)]
enum | { Pass, Fail, Discard }
impl TestResult {
/// Produces a test result that indicates the current test has passed.
pub fn passed() -> TestResult { TestResult::from_bool(true) }
/// Produces a test result that indicates the current test has failed.
pub fn failed() -> TestResult { TestResult::from_bool(false) }
/// Produces a test result that indicates failure from a runtime
/// error.
pub fn error(msg: &str) -> TestResult {
let mut r = TestResult::from_bool(false);
r.err = msg.to_string();
r
}
/// Produces a test result that instructs `quickcheck` to ignore it.
/// This is useful for restricting the domain of your properties.
/// When a test is discarded, `quickcheck` will replace it with a
/// fresh one (up to a certain limit).
pub fn discard() -> TestResult {
TestResult {
status: Discard,
arguments: vec![],
err: "".to_string(),
}
}
/// Converts a `bool` to a `TestResult`. A `true` value indicates that
/// the test has passed and a `false` value indicates that the test
/// has failed.
pub fn from_bool(b: bool) -> TestResult {
TestResult {
status: if b { Pass } else { Fail },
arguments: vec![],
err: "".to_string(),
}
}
/// Tests if a "procedure" fails when executed. The test passes only if
/// `f` generates a task failure during its execution.
pub fn must_fail<T: Send>(f: proc(): Send -> T) -> TestResult {
let (tx, _) = comm::channel();
TestResult::from_bool(
TaskBuilder::new()
.stdout(box ChanWriter::new(tx.clone()))
.stderr(box ChanWriter::new(tx))
.try(f)
.is_err())
}
/// Returns `true` if and only if this test result describes a failing
/// test.
pub fn is_failure(&self) -> bool {
match self.status {
Fail => true,
Pass|Discard => false,
}
}
/// Returns `true` if and only if this test result describes a failing
/// test as a result of a run time error.
pub fn is_error(&self) -> bool {
self.is_failure() && self.err.len() > 0
}
fn failed_msg(&self) -> String {
if self.err.len() == 0 {
format!(
"[quickcheck] TEST FAILED. Arguments: ({})",
self.arguments.connect(", "))
} else {
format!(
"[quickcheck] TEST FAILED (runtime error). \
Arguments: ({})\nError: {}",
self.arguments.connect(", "), self.err)
}
}
}
/// `Testable` describes types (e.g., a function) whose values can be
/// tested.
///
/// Anything that can be tested must be capable of producing a `TestResult`
/// given a random number generator. This is trivial for types like `bool`,
/// which are just converted to either a passing or failing test result.
///
/// For functions, an implementation must generate random arguments
/// and potentially shrink those arguments if they produce a failure.
///
/// It's unlikely that you'll have to implement this trait yourself.
/// This comes with a caveat: currently, only functions with 4 parameters
/// or fewer (both `fn` and `||` types) satisfy `Testable`. If you have
/// functions to test with more than 4 parameters, please
/// [file a bug](https://github.com/BurntSushi/quickcheck/issues) and
/// I'll hopefully add it. (As of now, it would be very difficult to
/// add your own implementation outside of `quickcheck`, since the
/// functions that do shrinking are not public.)
pub trait Testable : Send {
fn result<G: Gen>(&self, &mut G) -> TestResult;
}
impl Testable for bool {
fn result<G: Gen>(&self, _: &mut G) -> TestResult {
TestResult::from_bool(*self)
}
}
impl Testable for TestResult {
fn result<G: Gen>(&self, _: &mut G) -> TestResult { self.clone() }
}
impl<A> Testable for Result<A, String> where A: Testable {
fn result<G: Gen>(&self, g: &mut G) -> TestResult {
match *self {
Ok(ref r) => r.result(g),
Err(ref err) => TestResult::error(err.as_slice()),
}
}
}
impl<T> Testable for fn() -> T where T: Testable {
fn result<G: Gen>(&self, g: &mut G) -> TestResult {
shrink::<G, T, (), (), (), (), fn() -> T>(g, self)
}
}
impl<A, T> Testable for fn(A) -> T where A: AShow, T: Testable {
fn result<G: Gen>(&self, g: &mut G) -> TestResult {
shrink::<G, T, A, (), (), (), fn(A) -> T>(g, self)
}
}
impl<A, B, T> Testable for fn(A, B) -> T
where A: AShow, B: AShow, T: Testable {
fn result<G: Gen>(&self, g: &mut G) -> TestResult {
shrink::<G, T, A, B, (), (), fn(A, B) -> T>(g, self)
}
}
impl<A, B, C, T> Testable for fn(A, B, C) -> T
where A: AShow, B: AShow, C: AShow, T: Testable {
fn result<G: Gen>(&self, g: &mut G) -> TestResult {
shrink::<G, T, A, B, C, (), fn(A, B, C) -> T>(g, self)
}
}
impl<A, B, C, D, T,> Testable for fn(A, B, C, D) -> T
where A: AShow, B: AShow, C: AShow, D: AShow, T: Testable {
fn result<G: Gen>(&self, g: &mut G) -> TestResult {
shrink::<G, T, A, B, C, D, fn(A, B, C, D) -> T>(g, self)
}
}
trait Fun<A, B, C, D, T> {
fn call<G>(&self, g: &mut G,
a: Option<&A>, b: Option<&B>,
c: Option<&C>, d: Option<&D>)
-> TestResult
where G: Gen;
}
macro_rules! impl_fun_call(
($f:expr, $g:expr, $($name:ident,)+) => ({
let ($($name,)*) = ($($name.unwrap(),)*);
let f = $f;
let mut r = {
let ($($name,)*) = ($(box $name.clone(),)*);
safe(proc() { f($(*$name,)*) }).result($g)
};
if r.is_failure() {
r.arguments = vec![$($name.to_string(),)*];
}
r
});
)
impl<A, B, C, D, T> Fun<A, B, C, D, T> for fn() -> T
where A: AShow, B: AShow, C: AShow, D: AShow, T: Testable {
fn call<G: Gen>(&self, g: &mut G,
_: Option<&A>, _: Option<&B>,
_: Option<&C>, _: Option<&D>)
-> TestResult {
let f = *self;
safe(proc() { f() }).result(g)
}
}
impl<A, B, C, D, T> Fun<A, B, C, D, T> for fn(A) -> T
where A: AShow, B: AShow, C: AShow, D: AShow, T: Testable {
fn call<G: Gen>(&self, g: &mut G,
a: Option<&A>, _: Option<&B>,
_: Option<&C>, _: Option<&D>)
-> TestResult {
impl_fun_call!(*self, g, a,)
}
}
impl<A, B, C, D, T> Fun<A, B, C, D, T> for fn(A, B) -> T
where A: AShow, B: AShow, C: AShow, D: AShow, T: Testable {
fn call<G: Gen>(&self, g: &mut G,
a: Option<&A>, b: Option<&B>,
_: Option<&C>, _: Option<&D>)
-> TestResult {
impl_fun_call!(*self, g, a, b,)
}
}
impl<A, B, C, D, T> Fun<A, B, C, D, T> for fn(A, B, C) -> T
where A: AShow, B: AShow, C: AShow, D: AShow, T: Testable {
fn call<G: Gen>(&self, g: &mut G,
a: Option<&A>, b: Option<&B>,
c: Option<&C>, _: Option<&D>)
-> TestResult {
impl_fun_call!(*self, g, a, b, c,)
}
}
impl<A, B, C, D, T> Fun<A, B, C, D, T> for fn(A, B, C, D) -> T
where A: AShow, B: AShow, C: AShow, D: AShow, T: Testable {
fn call<G: Gen>(&self, g: &mut G,
a: Option<&A>, b: Option<&B>,
c: Option<&C>, d: Option<&D>)
-> TestResult {
impl_fun_call!(*self, g, a, b, c, d,)
}
}
fn shrink<G, T, A, B, C, D, F>(g: &mut G, fun: &F) -> TestResult
where G: Gen, T: Testable, A: AShow, B: AShow, C: AShow, D: AShow,
F: Fun<A, B, C, D, T> {
let (a, b, c, d): (A, B, C, D) = arby(g);
let r = fun.call(g, Some(&a), Some(&b), Some(&c), Some(&d));
match r.status {
Pass|Discard => r,
Fail => shrink_failure(g, (a, b, c, d).shrink(), fun).unwrap_or(r),
}
}
fn shrink_failure<G, T, A, B, C, D, F>
(g: &mut G,
mut shrinker: Box<Shrinker<(A, B, C, D)>+'static>,
fun: &F)
-> Option<TestResult>
where G: Gen, T: Testable, A: AShow, B: AShow, C: AShow, D: AShow,
F: Fun<A, B, C, D, T> {
for (a, b, c, d) in shrinker {
let r = fun.call(g, Some(&a), Some(&b), Some(&c), Some(&d));
match r.status {
// The shrunk value does not witness a failure, so
// throw it away.
Pass|Discard => continue,
// The shrunk value *does* witness a failure, so keep trying
// to shrink it.
Fail => {
let shrunk = shrink_failure(g, (a, b, c, d).shrink(), fun);
// If we couldn't witness a failure on any shrunk value,
// then return the failure we already have.
return Some(shrunk.unwrap_or(r))
},
}
}
None
}
#[cfg(quickfail)]
mod trap {
pub fn safe<T: Send>(fun: proc() -> T) -> Result<T, String> {
Ok(fun())
}
}
#[cfg(not(quickfail))]
mod trap {
use std::comm::channel;
use std::io::{ChanReader, ChanWriter};
use std::task::TaskBuilder;
// This is my bright idea for capturing runtime errors caused by a
// test. Actually, it looks like rustc uses a similar approach.
// The problem is, this is used for *each* test case passed to a
// property, whereas rustc does it once for each test.
//
// I'm not entirely sure there's much of an alternative either.
// We could launch a single task and pass arguments over a channel,
// but the task would need to be restarted if it failed due to a
// runtime error. Since these are rare, it'd probably be more efficient
// then this approach, but it would also be more complex.
//
// Moreover, this feature seems to prevent an implementation of
// Testable for a stack closure type. *sigh*
pub fn safe<T: Send>(fun: proc():Send -> T) -> Result<T, String> {
let (send, recv) = channel();
let stdout = ChanWriter::new(send.clone());
let stderr = ChanWriter::new(send);
let mut reader = ChanReader::new(recv);
let t = TaskBuilder::new()
.named("safefn")
.stdout(box stdout)
.stderr(box stderr);
match t.try(fun) {
Ok(v) => Ok(v),
Err(_) => {
let s = reader.read_to_string().unwrap();
Err(s.as_slice().trim().into_string())
}
}
}
}
/// Convenient aliases.
trait AShow : Arbitrary + Show {}
impl<A: Arbitrary + Show> AShow for A {}
fn arby<A: Arbitrary, G: Gen>(g: &mut G) -> A { Arbitrary::arbitrary(g) }
| Status | identifier_name |
tester.rs | use std::comm;
use std::fmt::Show;
use std::io::ChanWriter;
use std::iter;
use std::rand;
use std::task::TaskBuilder;
use super::{Arbitrary, Gen, Shrinker, StdGen};
use tester::trap::safe;
use tester::Status::{Discard, Fail, Pass};
/// The main QuickCheck type for setting configuration and running QuickCheck.
pub struct QuickCheck<G> {
tests: uint,
max_tests: uint,
gen: G,
}
impl QuickCheck<StdGen<rand::TaskRng>> {
/// Creates a new QuickCheck value.
///
/// This can be used to run QuickCheck on things that implement
/// `Testable`. You may also adjust the configuration, such as
/// the number of tests to run.
///
/// By default, the maximum number of passed tests is set to `100`,
/// the max number of overall tests is set to `10000` and the generator
/// is set to a `StdGen` with a default size of `100`.
pub fn new() -> QuickCheck<StdGen<rand::TaskRng>> {
QuickCheck {
tests: 100,
max_tests: 10000,
gen: StdGen::new(rand::task_rng(), 100),
}
}
}
impl<G: Gen> QuickCheck<G> {
/// Set the number of tests to run.
///
/// This actually refers to the maximum number of *passed* tests that
/// can occur. Namely, if a test causes a failure, future testing on that
/// property stops. Additionally, if tests are discarded, there may be
/// fewer than `tests` passed.
pub fn tests(mut self, tests: uint) -> QuickCheck<G> {
self.tests = tests;
self
}
/// Set the maximum number of tests to run.
///
/// The number of invocations of a property will never exceed this number.
/// This is necessary to cap the number of tests because QuickCheck
/// properties can discard tests.
pub fn max_tests(mut self, max_tests: uint) -> QuickCheck<G> {
self.max_tests = max_tests;
self
}
/// Set the random number generator to be used by QuickCheck.
pub fn gen(mut self, gen: G) -> QuickCheck<G> {
self.gen = gen;
self
}
/// Tests a property and returns the result.
///
/// The result returned is either the number of tests passed or a witness
/// of failure.
///
/// (If you're using Rust's unit testing infrastructure, then you'll
/// want to use the `quickcheck` method, which will `panic!` on failure.)
pub fn quicktest<A>(&mut self, f: A) -> Result<uint, TestResult>
where A: Testable {
let mut ntests: uint = 0;
for _ in iter::range(0, self.max_tests) {
if ntests >= self.tests {
break
}
let r = f.result(&mut self.gen);
match r.status {
Pass => ntests += 1,
Discard => continue,
Fail => return Err(r),
}
}
Ok(ntests)
}
/// Tests a property and calls `panic!` on failure.
///
/// The `panic!` message will include a (hopefully) minimal witness of
/// failure.
///
/// It is appropriate to use this method with Rust's unit testing
/// infrastructure.
///
/// Note that if the environment variable `RUST_LOG` is set to enable
/// `info` level log messages for the `quickcheck` crate, then this will
/// include output on how many QuickCheck tests were passed.
///
/// # Example
///
/// ```rust
/// use quickcheck::QuickCheck;
///
/// fn prop_reverse_reverse() {
/// fn revrev(xs: Vec<uint>) -> bool {
/// let rev: Vec<uint> = xs.clone().into_iter().rev().collect();
/// let revrev = rev.into_iter().rev().collect();
/// xs == revrev
/// }
/// QuickCheck::new().quickcheck(revrev);
/// }
/// ```
pub fn quickcheck<A>(&mut self, f: A) where A: Testable {
match self.quicktest(f) {
Ok(ntests) => info!("(Passed {} QuickCheck tests.)", ntests),
Err(result) => panic!(result.failed_msg()),
}
}
}
/// Convenience function for running QuickCheck.
///
/// This is an alias for `QuickCheck::new().quickcheck(f)`.
pub fn quickcheck<A: Testable>(f: A) { QuickCheck::new().quickcheck(f) }
/// Describes the status of a single instance of a test.
///
/// All testable things must be capable of producing a `TestResult`.
#[deriving(Clone, Show)]
pub struct TestResult {
status: Status,
arguments: Vec<String>,
err: String,
}
/// Whether a test has passed, failed or been discarded.
#[deriving(Clone, Show)]
enum Status { Pass, Fail, Discard }
impl TestResult {
/// Produces a test result that indicates the current test has passed.
pub fn passed() -> TestResult { TestResult::from_bool(true) }
/// Produces a test result that indicates the current test has failed.
pub fn failed() -> TestResult { TestResult::from_bool(false) }
/// Produces a test result that indicates failure from a runtime
/// error.
pub fn error(msg: &str) -> TestResult {
let mut r = TestResult::from_bool(false);
r.err = msg.to_string();
r
}
/// Produces a test result that instructs `quickcheck` to ignore it.
/// This is useful for restricting the domain of your properties.
/// When a test is discarded, `quickcheck` will replace it with a
/// fresh one (up to a certain limit).
pub fn discard() -> TestResult {
TestResult {
status: Discard,
arguments: vec![],
err: "".to_string(),
}
}
/// Converts a `bool` to a `TestResult`. A `true` value indicates that
/// the test has passed and a `false` value indicates that the test
/// has failed.
pub fn from_bool(b: bool) -> TestResult {
TestResult {
status: if b { Pass } else { Fail },
arguments: vec![],
err: "".to_string(),
}
}
/// Tests if a "procedure" fails when executed. The test passes only if
/// `f` generates a task failure during its execution.
pub fn must_fail<T: Send>(f: proc(): Send -> T) -> TestResult {
let (tx, _) = comm::channel();
TestResult::from_bool(
TaskBuilder::new()
.stdout(box ChanWriter::new(tx.clone()))
.stderr(box ChanWriter::new(tx))
.try(f)
.is_err())
}
/// Returns `true` if and only if this test result describes a failing
/// test.
pub fn is_failure(&self) -> bool {
match self.status {
Fail => true,
Pass|Discard => false,
}
}
/// Returns `true` if and only if this test result describes a failing
/// test as a result of a run time error.
pub fn is_error(&self) -> bool {
self.is_failure() && self.err.len() > 0
}
fn failed_msg(&self) -> String {
if self.err.len() == 0 {
format!(
"[quickcheck] TEST FAILED. Arguments: ({})",
self.arguments.connect(", "))
} else {
format!(
"[quickcheck] TEST FAILED (runtime error). \
Arguments: ({})\nError: {}",
self.arguments.connect(", "), self.err)
}
}
}
/// `Testable` describes types (e.g., a function) whose values can be
/// tested.
///
/// Anything that can be tested must be capable of producing a `TestResult`
/// given a random number generator. This is trivial for types like `bool`,
/// which are just converted to either a passing or failing test result.
///
/// For functions, an implementation must generate random arguments
/// and potentially shrink those arguments if they produce a failure.
///
/// It's unlikely that you'll have to implement this trait yourself.
/// This comes with a caveat: currently, only functions with 4 parameters
/// or fewer (both `fn` and `||` types) satisfy `Testable`. If you have
/// functions to test with more than 4 parameters, please
/// [file a bug](https://github.com/BurntSushi/quickcheck/issues) and
/// I'll hopefully add it. (As of now, it would be very difficult to
/// add your own implementation outside of `quickcheck`, since the
/// functions that do shrinking are not public.)
pub trait Testable : Send {
fn result<G: Gen>(&self, &mut G) -> TestResult;
}
impl Testable for bool {
fn result<G: Gen>(&self, _: &mut G) -> TestResult {
TestResult::from_bool(*self)
}
}
impl Testable for TestResult {
fn result<G: Gen>(&self, _: &mut G) -> TestResult { self.clone() }
}
impl<A> Testable for Result<A, String> where A: Testable {
fn result<G: Gen>(&self, g: &mut G) -> TestResult {
match *self {
Ok(ref r) => r.result(g),
Err(ref err) => TestResult::error(err.as_slice()),
}
}
}
impl<T> Testable for fn() -> T where T: Testable {
fn result<G: Gen>(&self, g: &mut G) -> TestResult {
shrink::<G, T, (), (), (), (), fn() -> T>(g, self)
}
}
impl<A, T> Testable for fn(A) -> T where A: AShow, T: Testable {
fn result<G: Gen>(&self, g: &mut G) -> TestResult {
shrink::<G, T, A, (), (), (), fn(A) -> T>(g, self)
}
}
impl<A, B, T> Testable for fn(A, B) -> T
where A: AShow, B: AShow, T: Testable {
fn result<G: Gen>(&self, g: &mut G) -> TestResult {
shrink::<G, T, A, B, (), (), fn(A, B) -> T>(g, self)
}
}
impl<A, B, C, T> Testable for fn(A, B, C) -> T
where A: AShow, B: AShow, C: AShow, T: Testable {
fn result<G: Gen>(&self, g: &mut G) -> TestResult {
shrink::<G, T, A, B, C, (), fn(A, B, C) -> T>(g, self)
}
}
impl<A, B, C, D, T,> Testable for fn(A, B, C, D) -> T
where A: AShow, B: AShow, C: AShow, D: AShow, T: Testable {
fn result<G: Gen>(&self, g: &mut G) -> TestResult {
shrink::<G, T, A, B, C, D, fn(A, B, C, D) -> T>(g, self)
}
}
trait Fun<A, B, C, D, T> {
fn call<G>(&self, g: &mut G,
a: Option<&A>, b: Option<&B>,
c: Option<&C>, d: Option<&D>)
-> TestResult
where G: Gen;
}
macro_rules! impl_fun_call(
($f:expr, $g:expr, $($name:ident,)+) => ({
let ($($name,)*) = ($($name.unwrap(),)*);
let f = $f;
let mut r = {
let ($($name,)*) = ($(box $name.clone(),)*);
safe(proc() { f($(*$name,)*) }).result($g)
};
if r.is_failure() {
r.arguments = vec![$($name.to_string(),)*];
}
r
});
)
impl<A, B, C, D, T> Fun<A, B, C, D, T> for fn() -> T
where A: AShow, B: AShow, C: AShow, D: AShow, T: Testable {
fn call<G: Gen>(&self, g: &mut G,
_: Option<&A>, _: Option<&B>,
_: Option<&C>, _: Option<&D>)
-> TestResult |
}
impl<A, B, C, D, T> Fun<A, B, C, D, T> for fn(A) -> T
where A: AShow, B: AShow, C: AShow, D: AShow, T: Testable {
fn call<G: Gen>(&self, g: &mut G,
a: Option<&A>, _: Option<&B>,
_: Option<&C>, _: Option<&D>)
-> TestResult {
impl_fun_call!(*self, g, a,)
}
}
impl<A, B, C, D, T> Fun<A, B, C, D, T> for fn(A, B) -> T
where A: AShow, B: AShow, C: AShow, D: AShow, T: Testable {
fn call<G: Gen>(&self, g: &mut G,
a: Option<&A>, b: Option<&B>,
_: Option<&C>, _: Option<&D>)
-> TestResult {
impl_fun_call!(*self, g, a, b,)
}
}
impl<A, B, C, D, T> Fun<A, B, C, D, T> for fn(A, B, C) -> T
where A: AShow, B: AShow, C: AShow, D: AShow, T: Testable {
fn call<G: Gen>(&self, g: &mut G,
a: Option<&A>, b: Option<&B>,
c: Option<&C>, _: Option<&D>)
-> TestResult {
impl_fun_call!(*self, g, a, b, c,)
}
}
impl<A, B, C, D, T> Fun<A, B, C, D, T> for fn(A, B, C, D) -> T
where A: AShow, B: AShow, C: AShow, D: AShow, T: Testable {
fn call<G: Gen>(&self, g: &mut G,
a: Option<&A>, b: Option<&B>,
c: Option<&C>, d: Option<&D>)
-> TestResult {
impl_fun_call!(*self, g, a, b, c, d,)
}
}
fn shrink<G, T, A, B, C, D, F>(g: &mut G, fun: &F) -> TestResult
where G: Gen, T: Testable, A: AShow, B: AShow, C: AShow, D: AShow,
F: Fun<A, B, C, D, T> {
let (a, b, c, d): (A, B, C, D) = arby(g);
let r = fun.call(g, Some(&a), Some(&b), Some(&c), Some(&d));
match r.status {
Pass|Discard => r,
Fail => shrink_failure(g, (a, b, c, d).shrink(), fun).unwrap_or(r),
}
}
fn shrink_failure<G, T, A, B, C, D, F>
(g: &mut G,
mut shrinker: Box<Shrinker<(A, B, C, D)>+'static>,
fun: &F)
-> Option<TestResult>
where G: Gen, T: Testable, A: AShow, B: AShow, C: AShow, D: AShow,
F: Fun<A, B, C, D, T> {
for (a, b, c, d) in shrinker {
let r = fun.call(g, Some(&a), Some(&b), Some(&c), Some(&d));
match r.status {
// The shrunk value does not witness a failure, so
// throw it away.
Pass|Discard => continue,
// The shrunk value *does* witness a failure, so keep trying
// to shrink it.
Fail => {
let shrunk = shrink_failure(g, (a, b, c, d).shrink(), fun);
// If we couldn't witness a failure on any shrunk value,
// then return the failure we already have.
return Some(shrunk.unwrap_or(r))
},
}
}
None
}
#[cfg(quickfail)]
mod trap {
pub fn safe<T: Send>(fun: proc() -> T) -> Result<T, String> {
Ok(fun())
}
}
#[cfg(not(quickfail))]
mod trap {
use std::comm::channel;
use std::io::{ChanReader, ChanWriter};
use std::task::TaskBuilder;
// This is my bright idea for capturing runtime errors caused by a
// test. Actually, it looks like rustc uses a similar approach.
// The problem is, this is used for *each* test case passed to a
// property, whereas rustc does it once for each test.
//
// I'm not entirely sure there's much of an alternative either.
// We could launch a single task and pass arguments over a channel,
// but the task would need to be restarted if it failed due to a
// runtime error. Since these are rare, it'd probably be more efficient
// then this approach, but it would also be more complex.
//
// Moreover, this feature seems to prevent an implementation of
// Testable for a stack closure type. *sigh*
pub fn safe<T: Send>(fun: proc():Send -> T) -> Result<T, String> {
let (send, recv) = channel();
let stdout = ChanWriter::new(send.clone());
let stderr = ChanWriter::new(send);
let mut reader = ChanReader::new(recv);
let t = TaskBuilder::new()
.named("safefn")
.stdout(box stdout)
.stderr(box stderr);
match t.try(fun) {
Ok(v) => Ok(v),
Err(_) => {
let s = reader.read_to_string().unwrap();
Err(s.as_slice().trim().into_string())
}
}
}
}
/// Convenient aliases.
trait AShow : Arbitrary + Show {}
impl<A: Arbitrary + Show> AShow for A {}
fn arby<A: Arbitrary, G: Gen>(g: &mut G) -> A { Arbitrary::arbitrary(g) }
| {
let f = *self;
safe(proc() { f() }).result(g)
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.