file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
fat_type.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! Loaded representation for runtime types.
use diem_types::{account_address::AccountAddress, vm_status::StatusCode};
use move_core_types::{
identifier::Identifier,
language_storage::{StructTag, TypeTag},
value::{MoveStructLayout, MoveTypeLayout},
};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::convert::TryInto;
use vm::{
errors::{PartialVMError, PartialVMResult},
file_format::AbilitySet,
};
#[derive(Debug, Clone, Copy)]
pub(crate) struct WrappedAbilitySet(pub AbilitySet);
impl Serialize for WrappedAbilitySet {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.0.into_u8().serialize(serializer)
}
}
impl<'de> Deserialize<'de> for WrappedAbilitySet {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let byte = u8::deserialize(deserializer)?;
Ok(WrappedAbilitySet(AbilitySet::from_u8(byte).ok_or_else(
|| serde::de::Error::custom(format!("Invalid ability set: {:X}", byte)),
)?))
}
}
/// VM representation of a struct type in Move.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct FatStructType {
pub address: AccountAddress,
pub module: Identifier,
pub name: Identifier,
pub abilities: WrappedAbilitySet,
pub ty_args: Vec<FatType>,
pub layout: Vec<FatType>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) enum FatType {
Bool,
U8,
U64,
U128,
Address,
Signer,
Vector(Box<FatType>),
Struct(Box<FatStructType>),
Reference(Box<FatType>),
MutableReference(Box<FatType>),
TyParam(usize),
}
impl FatStructType {
pub fn subst(&self, ty_args: &[FatType]) -> PartialVMResult<FatStructType> {
Ok(Self {
address: self.address,
module: self.module.clone(),
name: self.name.clone(),
abilities: self.abilities,
ty_args: self
.ty_args
.iter()
.map(|ty| ty.subst(ty_args))
.collect::<PartialVMResult<_>>()?,
layout: self
.layout
.iter()
.map(|ty| ty.subst(ty_args))
.collect::<PartialVMResult<_>>()?,
})
}
pub fn struct_tag(&self) -> PartialVMResult<StructTag> {
let ty_args = self
.ty_args
.iter()
.map(|ty| ty.type_tag())
.collect::<PartialVMResult<Vec<_>>>()?;
Ok(StructTag {
address: self.address,
module: self.module.clone(),
name: self.name.clone(),
type_params: ty_args,
})
}
}
impl FatType {
pub fn subst(&self, ty_args: &[FatType]) -> PartialVMResult<FatType> {
use FatType::*;
let res = match self {
TyParam(idx) => match ty_args.get(*idx) {
Some(ty) => ty.clone(),
None => {
return Err(
PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message(format!(
"fat type substitution failed: index out of bounds -- len {} got {}",
ty_args.len(),
idx
)),
);
}
},
Bool => Bool,
U8 => U8,
U64 => U64,
U128 => U128,
Address => Address,
Signer => Signer,
Vector(ty) => Vector(Box::new(ty.subst(ty_args)?)),
Reference(ty) => Reference(Box::new(ty.subst(ty_args)?)),
MutableReference(ty) => MutableReference(Box::new(ty.subst(ty_args)?)),
Struct(struct_ty) => Struct(Box::new(struct_ty.subst(ty_args)?)),
};
Ok(res)
}
pub fn type_tag(&self) -> PartialVMResult<TypeTag> {
use FatType::*;
let res = match self {
Bool => TypeTag::Bool,
U8 => TypeTag::U8,
U64 => TypeTag::U64,
U128 => TypeTag::U128,
Address => TypeTag::Address,
Signer => TypeTag::Signer,
Vector(ty) => TypeTag::Vector(Box::new(ty.type_tag()?)),
Struct(struct_ty) => TypeTag::Struct(struct_ty.struct_tag()?),
Reference(_) | MutableReference(_) | TyParam(_) => {
return Err(
PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message(format!("cannot derive type tag for {:?}", self)),
)
}
};
Ok(res)
}
}
impl TryInto<MoveStructLayout> for &FatStructType {
type Error = PartialVMError;
fn try_into(self) -> Result<MoveStructLayout, Self::Error> {
Ok(MoveStructLayout::new(
self.layout
.iter()
.map(|ty| ty.try_into())
.collect::<PartialVMResult<Vec<_>>>()?,
))
}
}
impl TryInto<MoveTypeLayout> for &FatType {
type Error = PartialVMError;
fn try_into(self) -> Result<MoveTypeLayout, Self::Error> |
}
| {
Ok(match self {
FatType::Address => MoveTypeLayout::Address,
FatType::U8 => MoveTypeLayout::U8,
FatType::U64 => MoveTypeLayout::U64,
FatType::U128 => MoveTypeLayout::U128,
FatType::Bool => MoveTypeLayout::Bool,
FatType::Vector(v) => MoveTypeLayout::Vector(Box::new(v.as_ref().try_into()?)),
FatType::Struct(s) => MoveTypeLayout::Struct(MoveStructLayout::new(
s.layout
.iter()
.map(|ty| ty.try_into())
.collect::<PartialVMResult<Vec<_>>>()?,
)),
FatType::Signer => MoveTypeLayout::Signer,
_ => return Err(PartialVMError::new(StatusCode::ABORT_TYPE_MISMATCH_ERROR)),
})
} | identifier_body |
fat_type.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! Loaded representation for runtime types.
use diem_types::{account_address::AccountAddress, vm_status::StatusCode};
use move_core_types::{
identifier::Identifier,
language_storage::{StructTag, TypeTag},
value::{MoveStructLayout, MoveTypeLayout},
};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::convert::TryInto;
use vm::{
errors::{PartialVMError, PartialVMResult},
file_format::AbilitySet,
};
#[derive(Debug, Clone, Copy)]
pub(crate) struct WrappedAbilitySet(pub AbilitySet);
impl Serialize for WrappedAbilitySet {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.0.into_u8().serialize(serializer)
}
}
impl<'de> Deserialize<'de> for WrappedAbilitySet {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let byte = u8::deserialize(deserializer)?;
Ok(WrappedAbilitySet(AbilitySet::from_u8(byte).ok_or_else(
|| serde::de::Error::custom(format!("Invalid ability set: {:X}", byte)),
)?))
}
}
/// VM representation of a struct type in Move.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct FatStructType {
pub address: AccountAddress,
pub module: Identifier,
pub name: Identifier,
pub abilities: WrappedAbilitySet,
pub ty_args: Vec<FatType>,
pub layout: Vec<FatType>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) enum FatType {
Bool,
U8,
U64,
U128,
Address,
Signer,
Vector(Box<FatType>),
Struct(Box<FatStructType>),
Reference(Box<FatType>),
MutableReference(Box<FatType>),
TyParam(usize),
}
impl FatStructType {
pub fn | (&self, ty_args: &[FatType]) -> PartialVMResult<FatStructType> {
Ok(Self {
address: self.address,
module: self.module.clone(),
name: self.name.clone(),
abilities: self.abilities,
ty_args: self
.ty_args
.iter()
.map(|ty| ty.subst(ty_args))
.collect::<PartialVMResult<_>>()?,
layout: self
.layout
.iter()
.map(|ty| ty.subst(ty_args))
.collect::<PartialVMResult<_>>()?,
})
}
pub fn struct_tag(&self) -> PartialVMResult<StructTag> {
let ty_args = self
.ty_args
.iter()
.map(|ty| ty.type_tag())
.collect::<PartialVMResult<Vec<_>>>()?;
Ok(StructTag {
address: self.address,
module: self.module.clone(),
name: self.name.clone(),
type_params: ty_args,
})
}
}
impl FatType {
pub fn subst(&self, ty_args: &[FatType]) -> PartialVMResult<FatType> {
use FatType::*;
let res = match self {
TyParam(idx) => match ty_args.get(*idx) {
Some(ty) => ty.clone(),
None => {
return Err(
PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message(format!(
"fat type substitution failed: index out of bounds -- len {} got {}",
ty_args.len(),
idx
)),
);
}
},
Bool => Bool,
U8 => U8,
U64 => U64,
U128 => U128,
Address => Address,
Signer => Signer,
Vector(ty) => Vector(Box::new(ty.subst(ty_args)?)),
Reference(ty) => Reference(Box::new(ty.subst(ty_args)?)),
MutableReference(ty) => MutableReference(Box::new(ty.subst(ty_args)?)),
Struct(struct_ty) => Struct(Box::new(struct_ty.subst(ty_args)?)),
};
Ok(res)
}
pub fn type_tag(&self) -> PartialVMResult<TypeTag> {
use FatType::*;
let res = match self {
Bool => TypeTag::Bool,
U8 => TypeTag::U8,
U64 => TypeTag::U64,
U128 => TypeTag::U128,
Address => TypeTag::Address,
Signer => TypeTag::Signer,
Vector(ty) => TypeTag::Vector(Box::new(ty.type_tag()?)),
Struct(struct_ty) => TypeTag::Struct(struct_ty.struct_tag()?),
Reference(_) | MutableReference(_) | TyParam(_) => {
return Err(
PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message(format!("cannot derive type tag for {:?}", self)),
)
}
};
Ok(res)
}
}
impl TryInto<MoveStructLayout> for &FatStructType {
type Error = PartialVMError;
fn try_into(self) -> Result<MoveStructLayout, Self::Error> {
Ok(MoveStructLayout::new(
self.layout
.iter()
.map(|ty| ty.try_into())
.collect::<PartialVMResult<Vec<_>>>()?,
))
}
}
impl TryInto<MoveTypeLayout> for &FatType {
type Error = PartialVMError;
fn try_into(self) -> Result<MoveTypeLayout, Self::Error> {
Ok(match self {
FatType::Address => MoveTypeLayout::Address,
FatType::U8 => MoveTypeLayout::U8,
FatType::U64 => MoveTypeLayout::U64,
FatType::U128 => MoveTypeLayout::U128,
FatType::Bool => MoveTypeLayout::Bool,
FatType::Vector(v) => MoveTypeLayout::Vector(Box::new(v.as_ref().try_into()?)),
FatType::Struct(s) => MoveTypeLayout::Struct(MoveStructLayout::new(
s.layout
.iter()
.map(|ty| ty.try_into())
.collect::<PartialVMResult<Vec<_>>>()?,
)),
FatType::Signer => MoveTypeLayout::Signer,
_ => return Err(PartialVMError::new(StatusCode::ABORT_TYPE_MISMATCH_ERROR)),
})
}
}
| subst | identifier_name |
job_queue.rs | use std::collections::HashSet;
use std::collections::hash_map::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::sync::TaskPool;
use std::sync::mpsc::{channel, Sender, Receiver};
use term::color::YELLOW;
use core::{Package, PackageId, Resolve, PackageSet};
use util::{Config, DependencyQueue, Fresh, Dirty, Freshness};
use util::{CargoResult, Dependency, profile};
use super::job::Job;
/// A management structure of the entire dependency graph to compile.
///
/// This structure is backed by the `DependencyQueue` type and manages the
/// actual compilation step of each package. Packages enqueue units of work and
/// then later on the entire graph is processed and compiled.
pub struct JobQueue<'a, 'b> {
pool: TaskPool,
queue: DependencyQueue<(&'a PackageId, Stage),
(&'a Package, Vec<(Job, Freshness)>)>,
tx: Sender<Message>,
rx: Receiver<Message>,
resolve: &'a Resolve,
packages: &'a PackageSet,
active: u32,
pending: HashMap<(&'a PackageId, Stage), PendingBuild>,
state: HashMap<&'a PackageId, Freshness>,
ignored: HashSet<&'a PackageId>,
printed: HashSet<&'a PackageId>,
}
/// A helper structure for metadata about the state of a building package.
struct PendingBuild {
/// Number of jobs currently active
amt: u32,
/// Current freshness state of this package. Any dirty target within a
/// package will cause the entire package to become dirty.
fresh: Freshness,
}
/// Current stage of compilation for an individual package.
///
/// This is the second layer of keys on the dependency queue to track the state
/// of where a particular package is in the compilation pipeline. Each of these
/// stages has a network of dependencies among them, outlined by the
/// `Dependency` implementation found below.
///
/// Each build step for a package is registered with one of these stages, and
/// each stage has a vector of work to perform in parallel.
#[derive(Hash, PartialEq, Eq, Clone, PartialOrd, Ord, Show, Copy)]
pub enum Stage {
Start,
BuildCustomBuild,
RunCustomBuild,
Libraries,
Binaries,
LibraryTests,
BinaryTests,
}
type Message = (PackageId, Stage, Freshness, CargoResult<()>);
impl<'a, 'b> JobQueue<'a, 'b> {
pub fn new(resolve: &'a Resolve, packages: &'a PackageSet,
config: &Config) -> JobQueue<'a, 'b> {
let (tx, rx) = channel();
JobQueue {
pool: TaskPool::new(config.jobs() as usize),
queue: DependencyQueue::new(),
tx: tx,
rx: rx,
resolve: resolve,
packages: packages,
active: 0,
pending: HashMap::new(),
state: HashMap::new(),
ignored: HashSet::new(),
printed: HashSet::new(),
}
}
pub fn enqueue(&mut self, pkg: &'a Package, stage: Stage,
jobs: Vec<(Job, Freshness)>) {
// Record the freshness state of this package as dirty if any job is
// dirty or fresh otherwise
let fresh = jobs.iter().fold(Fresh, |f1, &(_, f2)| f1.combine(f2));
match self.state.entry(pkg.get_package_id()) {
Occupied(mut entry) => { *entry.get_mut() = entry.get().combine(fresh); }
Vacant(entry) => { entry.insert(fresh); }
};
// Add the package to the dependency graph
self.queue.enqueue(&(self.resolve, self.packages), Fresh,
(pkg.get_package_id(), stage),
(pkg, jobs));
}
pub fn ignore(&mut self, pkg: &'a Package) {
self.ignored.insert(pkg.get_package_id());
}
/// Execute all jobs necessary to build the dependency graph.
///
/// This function will spawn off `config.jobs()` workers to build all of the
/// necessary dependencies, in order. Freshness is propagated as far as
/// possible along each dependency chain.
pub fn execute(&mut self, config: &Config) -> CargoResult<()> {
let _p = profile::start("executing the job graph");
// Iteratively execute the dependency graph. Each turn of this loop will
// schedule as much work as possible and then wait for one job to finish,
// possibly scheduling more work afterwards.
while self.queue.len() > 0 {
loop {
match self.queue.dequeue() {
Some((fresh, (_, stage), (pkg, jobs))) => {
info!("start: {} {:?}", pkg, stage);
try!(self.run(pkg, stage, fresh, jobs, config));
}
None => break,
}
}
// Now that all possible work has been scheduled, wait for a piece
// of work to finish. If any package fails to build then we stop
// scheduling work as quickly as possibly.
let (id, stage, fresh, result) = self.rx.recv().unwrap();
info!(" end: {} {:?}", id, stage);
let id = *self.state.keys().find(|&k| *k == &id).unwrap();
self.active -= 1;
match result {
Ok(()) => {
let state = &mut self.pending[(id, stage)];
state.amt -= 1;
state.fresh = state.fresh.combine(fresh);
if state.amt == 0 {
self.queue.finish(&(id, stage), state.fresh);
}
}
Err(e) => {
if self.active > 0 {
try!(config.shell().say(
"Build failed, waiting for other \
jobs to finish...", YELLOW)); | }
return Err(e)
}
}
}
log!(5, "rustc jobs completed");
Ok(())
}
/// Execute a stage of compilation for a package.
///
/// The input freshness is from `dequeue()` and indicates the combined
/// freshness of all upstream dependencies. This function will schedule all
/// work in `jobs` to be executed.
fn run(&mut self, pkg: &'a Package, stage: Stage, fresh: Freshness,
jobs: Vec<(Job, Freshness)>, config: &Config) -> CargoResult<()> {
let njobs = jobs.len();
let amt = if njobs == 0 {1} else {njobs as u32};
let id = pkg.get_package_id().clone();
// While the jobs are all running, we maintain some metadata about how
// many are running, the current state of freshness (of all the combined
// jobs), and the stage to pass to finish() later on.
self.active += amt;
self.pending.insert((pkg.get_package_id(), stage), PendingBuild {
amt: amt,
fresh: fresh,
});
let mut total_fresh = fresh.combine(self.state[pkg.get_package_id()]);
let mut running = Vec::new();
for (job, job_freshness) in jobs.into_iter() {
let fresh = job_freshness.combine(fresh);
total_fresh = total_fresh.combine(fresh);
let my_tx = self.tx.clone();
let id = id.clone();
let (desc_tx, desc_rx) = channel();
self.pool.execute(move|| {
my_tx.send((id, stage, fresh, job.run(fresh, desc_tx))).unwrap();
});
// only the first message of each job is processed
match desc_rx.recv() {
Ok(msg) => running.push(msg),
Err(..) => {}
}
}
// If no work was scheduled, make sure that a message is actually send
// on this channel.
if njobs == 0 {
self.tx.send((id, stage, fresh, Ok(()))).unwrap();
}
// Print out some nice progress information
//
// This isn't super trivial becuase we don't want to print loads and
// loads of information to the console, but we also want to produce a
// faithful representation of what's happening. This is somewhat nuanced
// as a package can start compiling *very* early on because of custom
// build commands and such.
//
// In general, we try to print "Compiling" for the first nontrivial task
// run for a package, regardless of when that is. We then don't print
// out any more information for a package after we've printed it once.
let print =!self.ignored.contains(&pkg.get_package_id());
let print = print &&!self.printed.contains(&pkg.get_package_id());
if print && (stage == Stage::Libraries ||
(total_fresh == Dirty && running.len() > 0)) {
self.printed.insert(pkg.get_package_id());
match total_fresh {
Fresh => try!(config.shell().verbose(|c| {
c.status("Fresh", pkg)
})),
Dirty => try!(config.shell().status("Compiling", pkg))
}
}
for msg in running.iter() {
try!(config.shell().verbose(|c| c.status("Running", msg)));
}
Ok(())
}
}
impl<'a> Dependency for (&'a PackageId, Stage) {
type Context = (&'a Resolve, &'a PackageSet);
fn dependencies(&self, &(resolve, packages): &(&'a Resolve, &'a PackageSet))
-> Vec<(&'a PackageId, Stage)> {
// This implementation of `Dependency` is the driver for the structure
// of the dependency graph of packages to be built. The "key" here is
// a pair of the package being built and the stage that it's at.
//
// Each stage here lists dependencies on the previous stages except for
// the start state which depends on the ending state of all dependent
// packages (as determined by the resolve context).
let (id, stage) = *self;
let pkg = packages.iter().find(|p| p.get_package_id() == id).unwrap();
let deps = resolve.deps(id).into_iter().flat_map(|a| a)
.filter(|dep| *dep!= id)
.map(|dep| {
(dep, pkg.get_dependencies().iter().find(|d| {
d.get_name() == dep.get_name()
}).unwrap())
});
match stage {
Stage::Start => Vec::new(),
// Building the build command itself starts off pretty easily,we
// just need to depend on all of the library stages of our own build
// dependencies (making them available to us).
Stage::BuildCustomBuild => {
let mut base = vec![(id, Stage::Start)];
base.extend(deps.filter(|&(_, dep)| dep.is_build())
.map(|(id, _)| (id, Stage::Libraries)));
base
}
// When running a custom build command, we need to be sure that our
// own custom build command is actually built, and then we need to
// wait for all our dependencies to finish their custom build
// commands themselves (as they may provide input to us).
Stage::RunCustomBuild => {
let mut base = vec![(id, Stage::BuildCustomBuild)];
base.extend(deps.filter(|&(_, dep)| dep.is_transitive())
.map(|(id, _)| (id, Stage::RunCustomBuild)));
base
}
// Building a library depends on our own custom build command plus
// all our transitive dependencies.
Stage::Libraries => {
let mut base = vec![(id, Stage::RunCustomBuild)];
base.extend(deps.filter(|&(_, dep)| dep.is_transitive())
.map(|(id, _)| (id, Stage::Libraries)));
base
}
// Binaries only depend on libraries being available. Note that they
// do not depend on dev-dependencies.
Stage::Binaries => vec![(id, Stage::Libraries)],
// Tests depend on all dependencies (including dev-dependencies) in
// addition to the library stage for this package. Note, however,
// that library tests only need to depend the custom build command
// being run, not the libraries themselves.
Stage::BinaryTests | Stage::LibraryTests => {
let mut base = if stage == Stage::BinaryTests {
vec![(id, Stage::Libraries)]
} else {
vec![(id, Stage::RunCustomBuild)]
};
base.extend(deps.map(|(id, _)| (id, Stage::Libraries)));
base
}
}
}
} | for _ in self.rx.iter().take(self.active as usize) {} | random_line_split |
job_queue.rs | use std::collections::HashSet;
use std::collections::hash_map::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::sync::TaskPool;
use std::sync::mpsc::{channel, Sender, Receiver};
use term::color::YELLOW;
use core::{Package, PackageId, Resolve, PackageSet};
use util::{Config, DependencyQueue, Fresh, Dirty, Freshness};
use util::{CargoResult, Dependency, profile};
use super::job::Job;
/// A management structure of the entire dependency graph to compile.
///
/// This structure is backed by the `DependencyQueue` type and manages the
/// actual compilation step of each package. Packages enqueue units of work and
/// then later on the entire graph is processed and compiled.
pub struct JobQueue<'a, 'b> {
pool: TaskPool,
queue: DependencyQueue<(&'a PackageId, Stage),
(&'a Package, Vec<(Job, Freshness)>)>,
tx: Sender<Message>,
rx: Receiver<Message>,
resolve: &'a Resolve,
packages: &'a PackageSet,
active: u32,
pending: HashMap<(&'a PackageId, Stage), PendingBuild>,
state: HashMap<&'a PackageId, Freshness>,
ignored: HashSet<&'a PackageId>,
printed: HashSet<&'a PackageId>,
}
/// A helper structure for metadata about the state of a building package.
struct PendingBuild {
/// Number of jobs currently active
amt: u32,
/// Current freshness state of this package. Any dirty target within a
/// package will cause the entire package to become dirty.
fresh: Freshness,
}
/// Current stage of compilation for an individual package.
///
/// This is the second layer of keys on the dependency queue to track the state
/// of where a particular package is in the compilation pipeline. Each of these
/// stages has a network of dependencies among them, outlined by the
/// `Dependency` implementation found below.
///
/// Each build step for a package is registered with one of these stages, and
/// each stage has a vector of work to perform in parallel.
#[derive(Hash, PartialEq, Eq, Clone, PartialOrd, Ord, Show, Copy)]
pub enum Stage {
Start,
BuildCustomBuild,
RunCustomBuild,
Libraries,
Binaries,
LibraryTests,
BinaryTests,
}
type Message = (PackageId, Stage, Freshness, CargoResult<()>);
impl<'a, 'b> JobQueue<'a, 'b> {
pub fn new(resolve: &'a Resolve, packages: &'a PackageSet,
config: &Config) -> JobQueue<'a, 'b> {
let (tx, rx) = channel();
JobQueue {
pool: TaskPool::new(config.jobs() as usize),
queue: DependencyQueue::new(),
tx: tx,
rx: rx,
resolve: resolve,
packages: packages,
active: 0,
pending: HashMap::new(),
state: HashMap::new(),
ignored: HashSet::new(),
printed: HashSet::new(),
}
}
pub fn enqueue(&mut self, pkg: &'a Package, stage: Stage,
jobs: Vec<(Job, Freshness)>) {
// Record the freshness state of this package as dirty if any job is
// dirty or fresh otherwise
let fresh = jobs.iter().fold(Fresh, |f1, &(_, f2)| f1.combine(f2));
match self.state.entry(pkg.get_package_id()) {
Occupied(mut entry) => { *entry.get_mut() = entry.get().combine(fresh); }
Vacant(entry) => { entry.insert(fresh); }
};
// Add the package to the dependency graph
self.queue.enqueue(&(self.resolve, self.packages), Fresh,
(pkg.get_package_id(), stage),
(pkg, jobs));
}
pub fn ignore(&mut self, pkg: &'a Package) {
self.ignored.insert(pkg.get_package_id());
}
/// Execute all jobs necessary to build the dependency graph.
///
/// This function will spawn off `config.jobs()` workers to build all of the
/// necessary dependencies, in order. Freshness is propagated as far as
/// possible along each dependency chain.
pub fn execute(&mut self, config: &Config) -> CargoResult<()> {
let _p = profile::start("executing the job graph");
// Iteratively execute the dependency graph. Each turn of this loop will
// schedule as much work as possible and then wait for one job to finish,
// possibly scheduling more work afterwards.
while self.queue.len() > 0 {
loop {
match self.queue.dequeue() {
Some((fresh, (_, stage), (pkg, jobs))) => {
info!("start: {} {:?}", pkg, stage);
try!(self.run(pkg, stage, fresh, jobs, config));
}
None => break,
}
}
// Now that all possible work has been scheduled, wait for a piece
// of work to finish. If any package fails to build then we stop
// scheduling work as quickly as possibly.
let (id, stage, fresh, result) = self.rx.recv().unwrap();
info!(" end: {} {:?}", id, stage);
let id = *self.state.keys().find(|&k| *k == &id).unwrap();
self.active -= 1;
match result {
Ok(()) => {
let state = &mut self.pending[(id, stage)];
state.amt -= 1;
state.fresh = state.fresh.combine(fresh);
if state.amt == 0 {
self.queue.finish(&(id, stage), state.fresh);
}
}
Err(e) => {
if self.active > 0 {
try!(config.shell().say(
"Build failed, waiting for other \
jobs to finish...", YELLOW));
for _ in self.rx.iter().take(self.active as usize) {}
}
return Err(e)
}
}
}
log!(5, "rustc jobs completed");
Ok(())
}
/// Execute a stage of compilation for a package.
///
/// The input freshness is from `dequeue()` and indicates the combined
/// freshness of all upstream dependencies. This function will schedule all
/// work in `jobs` to be executed.
fn run(&mut self, pkg: &'a Package, stage: Stage, fresh: Freshness,
jobs: Vec<(Job, Freshness)>, config: &Config) -> CargoResult<()> {
let njobs = jobs.len();
let amt = if njobs == 0 {1} else {njobs as u32};
let id = pkg.get_package_id().clone();
// While the jobs are all running, we maintain some metadata about how
// many are running, the current state of freshness (of all the combined
// jobs), and the stage to pass to finish() later on.
self.active += amt;
self.pending.insert((pkg.get_package_id(), stage), PendingBuild {
amt: amt,
fresh: fresh,
});
let mut total_fresh = fresh.combine(self.state[pkg.get_package_id()]);
let mut running = Vec::new();
for (job, job_freshness) in jobs.into_iter() {
let fresh = job_freshness.combine(fresh);
total_fresh = total_fresh.combine(fresh);
let my_tx = self.tx.clone();
let id = id.clone();
let (desc_tx, desc_rx) = channel();
self.pool.execute(move|| {
my_tx.send((id, stage, fresh, job.run(fresh, desc_tx))).unwrap();
});
// only the first message of each job is processed
match desc_rx.recv() {
Ok(msg) => running.push(msg),
Err(..) => {}
}
}
// If no work was scheduled, make sure that a message is actually send
// on this channel.
if njobs == 0 {
self.tx.send((id, stage, fresh, Ok(()))).unwrap();
}
// Print out some nice progress information
//
// This isn't super trivial becuase we don't want to print loads and
// loads of information to the console, but we also want to produce a
// faithful representation of what's happening. This is somewhat nuanced
// as a package can start compiling *very* early on because of custom
// build commands and such.
//
// In general, we try to print "Compiling" for the first nontrivial task
// run for a package, regardless of when that is. We then don't print
// out any more information for a package after we've printed it once.
let print =!self.ignored.contains(&pkg.get_package_id());
let print = print &&!self.printed.contains(&pkg.get_package_id());
if print && (stage == Stage::Libraries ||
(total_fresh == Dirty && running.len() > 0)) {
self.printed.insert(pkg.get_package_id());
match total_fresh {
Fresh => try!(config.shell().verbose(|c| {
c.status("Fresh", pkg)
})),
Dirty => try!(config.shell().status("Compiling", pkg))
}
}
for msg in running.iter() {
try!(config.shell().verbose(|c| c.status("Running", msg)));
}
Ok(())
}
}
impl<'a> Dependency for (&'a PackageId, Stage) {
type Context = (&'a Resolve, &'a PackageSet);
fn | (&self, &(resolve, packages): &(&'a Resolve, &'a PackageSet))
-> Vec<(&'a PackageId, Stage)> {
// This implementation of `Dependency` is the driver for the structure
// of the dependency graph of packages to be built. The "key" here is
// a pair of the package being built and the stage that it's at.
//
// Each stage here lists dependencies on the previous stages except for
// the start state which depends on the ending state of all dependent
// packages (as determined by the resolve context).
let (id, stage) = *self;
let pkg = packages.iter().find(|p| p.get_package_id() == id).unwrap();
let deps = resolve.deps(id).into_iter().flat_map(|a| a)
.filter(|dep| *dep!= id)
.map(|dep| {
(dep, pkg.get_dependencies().iter().find(|d| {
d.get_name() == dep.get_name()
}).unwrap())
});
match stage {
Stage::Start => Vec::new(),
// Building the build command itself starts off pretty easily,we
// just need to depend on all of the library stages of our own build
// dependencies (making them available to us).
Stage::BuildCustomBuild => {
let mut base = vec![(id, Stage::Start)];
base.extend(deps.filter(|&(_, dep)| dep.is_build())
.map(|(id, _)| (id, Stage::Libraries)));
base
}
// When running a custom build command, we need to be sure that our
// own custom build command is actually built, and then we need to
// wait for all our dependencies to finish their custom build
// commands themselves (as they may provide input to us).
Stage::RunCustomBuild => {
let mut base = vec![(id, Stage::BuildCustomBuild)];
base.extend(deps.filter(|&(_, dep)| dep.is_transitive())
.map(|(id, _)| (id, Stage::RunCustomBuild)));
base
}
// Building a library depends on our own custom build command plus
// all our transitive dependencies.
Stage::Libraries => {
let mut base = vec![(id, Stage::RunCustomBuild)];
base.extend(deps.filter(|&(_, dep)| dep.is_transitive())
.map(|(id, _)| (id, Stage::Libraries)));
base
}
// Binaries only depend on libraries being available. Note that they
// do not depend on dev-dependencies.
Stage::Binaries => vec![(id, Stage::Libraries)],
// Tests depend on all dependencies (including dev-dependencies) in
// addition to the library stage for this package. Note, however,
// that library tests only need to depend the custom build command
// being run, not the libraries themselves.
Stage::BinaryTests | Stage::LibraryTests => {
let mut base = if stage == Stage::BinaryTests {
vec![(id, Stage::Libraries)]
} else {
vec![(id, Stage::RunCustomBuild)]
};
base.extend(deps.map(|(id, _)| (id, Stage::Libraries)));
base
}
}
}
}
| dependencies | identifier_name |
job_queue.rs | use std::collections::HashSet;
use std::collections::hash_map::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::sync::TaskPool;
use std::sync::mpsc::{channel, Sender, Receiver};
use term::color::YELLOW;
use core::{Package, PackageId, Resolve, PackageSet};
use util::{Config, DependencyQueue, Fresh, Dirty, Freshness};
use util::{CargoResult, Dependency, profile};
use super::job::Job;
/// A management structure of the entire dependency graph to compile.
///
/// This structure is backed by the `DependencyQueue` type and manages the
/// actual compilation step of each package. Packages enqueue units of work and
/// then later on the entire graph is processed and compiled.
pub struct JobQueue<'a, 'b> {
pool: TaskPool,
queue: DependencyQueue<(&'a PackageId, Stage),
(&'a Package, Vec<(Job, Freshness)>)>,
tx: Sender<Message>,
rx: Receiver<Message>,
resolve: &'a Resolve,
packages: &'a PackageSet,
active: u32,
pending: HashMap<(&'a PackageId, Stage), PendingBuild>,
state: HashMap<&'a PackageId, Freshness>,
ignored: HashSet<&'a PackageId>,
printed: HashSet<&'a PackageId>,
}
/// A helper structure for metadata about the state of a building package.
struct PendingBuild {
/// Number of jobs currently active
amt: u32,
/// Current freshness state of this package. Any dirty target within a
/// package will cause the entire package to become dirty.
fresh: Freshness,
}
/// Current stage of compilation for an individual package.
///
/// This is the second layer of keys on the dependency queue to track the state
/// of where a particular package is in the compilation pipeline. Each of these
/// stages has a network of dependencies among them, outlined by the
/// `Dependency` implementation found below.
///
/// Each build step for a package is registered with one of these stages, and
/// each stage has a vector of work to perform in parallel.
#[derive(Hash, PartialEq, Eq, Clone, PartialOrd, Ord, Show, Copy)]
pub enum Stage {
Start,
BuildCustomBuild,
RunCustomBuild,
Libraries,
Binaries,
LibraryTests,
BinaryTests,
}
type Message = (PackageId, Stage, Freshness, CargoResult<()>);
impl<'a, 'b> JobQueue<'a, 'b> {
pub fn new(resolve: &'a Resolve, packages: &'a PackageSet,
config: &Config) -> JobQueue<'a, 'b> {
let (tx, rx) = channel();
JobQueue {
pool: TaskPool::new(config.jobs() as usize),
queue: DependencyQueue::new(),
tx: tx,
rx: rx,
resolve: resolve,
packages: packages,
active: 0,
pending: HashMap::new(),
state: HashMap::new(),
ignored: HashSet::new(),
printed: HashSet::new(),
}
}
pub fn enqueue(&mut self, pkg: &'a Package, stage: Stage,
jobs: Vec<(Job, Freshness)>) {
// Record the freshness state of this package as dirty if any job is
// dirty or fresh otherwise
let fresh = jobs.iter().fold(Fresh, |f1, &(_, f2)| f1.combine(f2));
match self.state.entry(pkg.get_package_id()) {
Occupied(mut entry) => { *entry.get_mut() = entry.get().combine(fresh); }
Vacant(entry) => { entry.insert(fresh); }
};
// Add the package to the dependency graph
self.queue.enqueue(&(self.resolve, self.packages), Fresh,
(pkg.get_package_id(), stage),
(pkg, jobs));
}
pub fn ignore(&mut self, pkg: &'a Package) |
/// Execute all jobs necessary to build the dependency graph.
///
/// This function will spawn off `config.jobs()` workers to build all of the
/// necessary dependencies, in order. Freshness is propagated as far as
/// possible along each dependency chain.
pub fn execute(&mut self, config: &Config) -> CargoResult<()> {
let _p = profile::start("executing the job graph");
// Iteratively execute the dependency graph. Each turn of this loop will
// schedule as much work as possible and then wait for one job to finish,
// possibly scheduling more work afterwards.
while self.queue.len() > 0 {
loop {
match self.queue.dequeue() {
Some((fresh, (_, stage), (pkg, jobs))) => {
info!("start: {} {:?}", pkg, stage);
try!(self.run(pkg, stage, fresh, jobs, config));
}
None => break,
}
}
// Now that all possible work has been scheduled, wait for a piece
// of work to finish. If any package fails to build then we stop
// scheduling work as quickly as possibly.
let (id, stage, fresh, result) = self.rx.recv().unwrap();
info!(" end: {} {:?}", id, stage);
let id = *self.state.keys().find(|&k| *k == &id).unwrap();
self.active -= 1;
match result {
Ok(()) => {
let state = &mut self.pending[(id, stage)];
state.amt -= 1;
state.fresh = state.fresh.combine(fresh);
if state.amt == 0 {
self.queue.finish(&(id, stage), state.fresh);
}
}
Err(e) => {
if self.active > 0 {
try!(config.shell().say(
"Build failed, waiting for other \
jobs to finish...", YELLOW));
for _ in self.rx.iter().take(self.active as usize) {}
}
return Err(e)
}
}
}
log!(5, "rustc jobs completed");
Ok(())
}
/// Execute a stage of compilation for a package.
///
/// The input freshness is from `dequeue()` and indicates the combined
/// freshness of all upstream dependencies. This function will schedule all
/// work in `jobs` to be executed.
fn run(&mut self, pkg: &'a Package, stage: Stage, fresh: Freshness,
jobs: Vec<(Job, Freshness)>, config: &Config) -> CargoResult<()> {
let njobs = jobs.len();
let amt = if njobs == 0 {1} else {njobs as u32};
let id = pkg.get_package_id().clone();
// While the jobs are all running, we maintain some metadata about how
// many are running, the current state of freshness (of all the combined
// jobs), and the stage to pass to finish() later on.
self.active += amt;
self.pending.insert((pkg.get_package_id(), stage), PendingBuild {
amt: amt,
fresh: fresh,
});
let mut total_fresh = fresh.combine(self.state[pkg.get_package_id()]);
let mut running = Vec::new();
for (job, job_freshness) in jobs.into_iter() {
let fresh = job_freshness.combine(fresh);
total_fresh = total_fresh.combine(fresh);
let my_tx = self.tx.clone();
let id = id.clone();
let (desc_tx, desc_rx) = channel();
self.pool.execute(move|| {
my_tx.send((id, stage, fresh, job.run(fresh, desc_tx))).unwrap();
});
// only the first message of each job is processed
match desc_rx.recv() {
Ok(msg) => running.push(msg),
Err(..) => {}
}
}
// If no work was scheduled, make sure that a message is actually send
// on this channel.
if njobs == 0 {
self.tx.send((id, stage, fresh, Ok(()))).unwrap();
}
// Print out some nice progress information
//
// This isn't super trivial becuase we don't want to print loads and
// loads of information to the console, but we also want to produce a
// faithful representation of what's happening. This is somewhat nuanced
// as a package can start compiling *very* early on because of custom
// build commands and such.
//
// In general, we try to print "Compiling" for the first nontrivial task
// run for a package, regardless of when that is. We then don't print
// out any more information for a package after we've printed it once.
let print =!self.ignored.contains(&pkg.get_package_id());
let print = print &&!self.printed.contains(&pkg.get_package_id());
if print && (stage == Stage::Libraries ||
(total_fresh == Dirty && running.len() > 0)) {
self.printed.insert(pkg.get_package_id());
match total_fresh {
Fresh => try!(config.shell().verbose(|c| {
c.status("Fresh", pkg)
})),
Dirty => try!(config.shell().status("Compiling", pkg))
}
}
for msg in running.iter() {
try!(config.shell().verbose(|c| c.status("Running", msg)));
}
Ok(())
}
}
impl<'a> Dependency for (&'a PackageId, Stage) {
type Context = (&'a Resolve, &'a PackageSet);
fn dependencies(&self, &(resolve, packages): &(&'a Resolve, &'a PackageSet))
-> Vec<(&'a PackageId, Stage)> {
// This implementation of `Dependency` is the driver for the structure
// of the dependency graph of packages to be built. The "key" here is
// a pair of the package being built and the stage that it's at.
//
// Each stage here lists dependencies on the previous stages except for
// the start state which depends on the ending state of all dependent
// packages (as determined by the resolve context).
let (id, stage) = *self;
let pkg = packages.iter().find(|p| p.get_package_id() == id).unwrap();
let deps = resolve.deps(id).into_iter().flat_map(|a| a)
.filter(|dep| *dep!= id)
.map(|dep| {
(dep, pkg.get_dependencies().iter().find(|d| {
d.get_name() == dep.get_name()
}).unwrap())
});
match stage {
Stage::Start => Vec::new(),
// Building the build command itself starts off pretty easily,we
// just need to depend on all of the library stages of our own build
// dependencies (making them available to us).
Stage::BuildCustomBuild => {
let mut base = vec![(id, Stage::Start)];
base.extend(deps.filter(|&(_, dep)| dep.is_build())
.map(|(id, _)| (id, Stage::Libraries)));
base
}
// When running a custom build command, we need to be sure that our
// own custom build command is actually built, and then we need to
// wait for all our dependencies to finish their custom build
// commands themselves (as they may provide input to us).
Stage::RunCustomBuild => {
let mut base = vec![(id, Stage::BuildCustomBuild)];
base.extend(deps.filter(|&(_, dep)| dep.is_transitive())
.map(|(id, _)| (id, Stage::RunCustomBuild)));
base
}
// Building a library depends on our own custom build command plus
// all our transitive dependencies.
Stage::Libraries => {
let mut base = vec![(id, Stage::RunCustomBuild)];
base.extend(deps.filter(|&(_, dep)| dep.is_transitive())
.map(|(id, _)| (id, Stage::Libraries)));
base
}
// Binaries only depend on libraries being available. Note that they
// do not depend on dev-dependencies.
Stage::Binaries => vec![(id, Stage::Libraries)],
// Tests depend on all dependencies (including dev-dependencies) in
// addition to the library stage for this package. Note, however,
// that library tests only need to depend the custom build command
// being run, not the libraries themselves.
Stage::BinaryTests | Stage::LibraryTests => {
let mut base = if stage == Stage::BinaryTests {
vec![(id, Stage::Libraries)]
} else {
vec![(id, Stage::RunCustomBuild)]
};
base.extend(deps.map(|(id, _)| (id, Stage::Libraries)));
base
}
}
}
}
| {
self.ignored.insert(pkg.get_package_id());
} | identifier_body |
job_queue.rs | use std::collections::HashSet;
use std::collections::hash_map::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::sync::TaskPool;
use std::sync::mpsc::{channel, Sender, Receiver};
use term::color::YELLOW;
use core::{Package, PackageId, Resolve, PackageSet};
use util::{Config, DependencyQueue, Fresh, Dirty, Freshness};
use util::{CargoResult, Dependency, profile};
use super::job::Job;
/// A management structure of the entire dependency graph to compile.
///
/// This structure is backed by the `DependencyQueue` type and manages the
/// actual compilation step of each package. Packages enqueue units of work and
/// then later on the entire graph is processed and compiled.
pub struct JobQueue<'a, 'b> {
pool: TaskPool,
queue: DependencyQueue<(&'a PackageId, Stage),
(&'a Package, Vec<(Job, Freshness)>)>,
tx: Sender<Message>,
rx: Receiver<Message>,
resolve: &'a Resolve,
packages: &'a PackageSet,
active: u32,
pending: HashMap<(&'a PackageId, Stage), PendingBuild>,
state: HashMap<&'a PackageId, Freshness>,
ignored: HashSet<&'a PackageId>,
printed: HashSet<&'a PackageId>,
}
/// A helper structure for metadata about the state of a building package.
struct PendingBuild {
/// Number of jobs currently active
amt: u32,
/// Current freshness state of this package. Any dirty target within a
/// package will cause the entire package to become dirty.
fresh: Freshness,
}
/// Current stage of compilation for an individual package.
///
/// This is the second layer of keys on the dependency queue to track the state
/// of where a particular package is in the compilation pipeline. Each of these
/// stages has a network of dependencies among them, outlined by the
/// `Dependency` implementation found below.
///
/// Each build step for a package is registered with one of these stages, and
/// each stage has a vector of work to perform in parallel.
#[derive(Hash, PartialEq, Eq, Clone, PartialOrd, Ord, Show, Copy)]
pub enum Stage {
Start,
BuildCustomBuild,
RunCustomBuild,
Libraries,
Binaries,
LibraryTests,
BinaryTests,
}
type Message = (PackageId, Stage, Freshness, CargoResult<()>);
impl<'a, 'b> JobQueue<'a, 'b> {
pub fn new(resolve: &'a Resolve, packages: &'a PackageSet,
config: &Config) -> JobQueue<'a, 'b> {
let (tx, rx) = channel();
JobQueue {
pool: TaskPool::new(config.jobs() as usize),
queue: DependencyQueue::new(),
tx: tx,
rx: rx,
resolve: resolve,
packages: packages,
active: 0,
pending: HashMap::new(),
state: HashMap::new(),
ignored: HashSet::new(),
printed: HashSet::new(),
}
}
pub fn enqueue(&mut self, pkg: &'a Package, stage: Stage,
jobs: Vec<(Job, Freshness)>) {
// Record the freshness state of this package as dirty if any job is
// dirty or fresh otherwise
let fresh = jobs.iter().fold(Fresh, |f1, &(_, f2)| f1.combine(f2));
match self.state.entry(pkg.get_package_id()) {
Occupied(mut entry) => { *entry.get_mut() = entry.get().combine(fresh); }
Vacant(entry) => |
};
// Add the package to the dependency graph
self.queue.enqueue(&(self.resolve, self.packages), Fresh,
(pkg.get_package_id(), stage),
(pkg, jobs));
}
pub fn ignore(&mut self, pkg: &'a Package) {
self.ignored.insert(pkg.get_package_id());
}
/// Execute all jobs necessary to build the dependency graph.
///
/// This function will spawn off `config.jobs()` workers to build all of the
/// necessary dependencies, in order. Freshness is propagated as far as
/// possible along each dependency chain.
pub fn execute(&mut self, config: &Config) -> CargoResult<()> {
let _p = profile::start("executing the job graph");
// Iteratively execute the dependency graph. Each turn of this loop will
// schedule as much work as possible and then wait for one job to finish,
// possibly scheduling more work afterwards.
while self.queue.len() > 0 {
loop {
match self.queue.dequeue() {
Some((fresh, (_, stage), (pkg, jobs))) => {
info!("start: {} {:?}", pkg, stage);
try!(self.run(pkg, stage, fresh, jobs, config));
}
None => break,
}
}
// Now that all possible work has been scheduled, wait for a piece
// of work to finish. If any package fails to build then we stop
// scheduling work as quickly as possibly.
let (id, stage, fresh, result) = self.rx.recv().unwrap();
info!(" end: {} {:?}", id, stage);
let id = *self.state.keys().find(|&k| *k == &id).unwrap();
self.active -= 1;
match result {
Ok(()) => {
let state = &mut self.pending[(id, stage)];
state.amt -= 1;
state.fresh = state.fresh.combine(fresh);
if state.amt == 0 {
self.queue.finish(&(id, stage), state.fresh);
}
}
Err(e) => {
if self.active > 0 {
try!(config.shell().say(
"Build failed, waiting for other \
jobs to finish...", YELLOW));
for _ in self.rx.iter().take(self.active as usize) {}
}
return Err(e)
}
}
}
log!(5, "rustc jobs completed");
Ok(())
}
/// Execute a stage of compilation for a package.
///
/// The input freshness is from `dequeue()` and indicates the combined
/// freshness of all upstream dependencies. This function will schedule all
/// work in `jobs` to be executed.
fn run(&mut self, pkg: &'a Package, stage: Stage, fresh: Freshness,
jobs: Vec<(Job, Freshness)>, config: &Config) -> CargoResult<()> {
let njobs = jobs.len();
let amt = if njobs == 0 {1} else {njobs as u32};
let id = pkg.get_package_id().clone();
// While the jobs are all running, we maintain some metadata about how
// many are running, the current state of freshness (of all the combined
// jobs), and the stage to pass to finish() later on.
self.active += amt;
self.pending.insert((pkg.get_package_id(), stage), PendingBuild {
amt: amt,
fresh: fresh,
});
let mut total_fresh = fresh.combine(self.state[pkg.get_package_id()]);
let mut running = Vec::new();
for (job, job_freshness) in jobs.into_iter() {
let fresh = job_freshness.combine(fresh);
total_fresh = total_fresh.combine(fresh);
let my_tx = self.tx.clone();
let id = id.clone();
let (desc_tx, desc_rx) = channel();
self.pool.execute(move|| {
my_tx.send((id, stage, fresh, job.run(fresh, desc_tx))).unwrap();
});
// only the first message of each job is processed
match desc_rx.recv() {
Ok(msg) => running.push(msg),
Err(..) => {}
}
}
// If no work was scheduled, make sure that a message is actually send
// on this channel.
if njobs == 0 {
self.tx.send((id, stage, fresh, Ok(()))).unwrap();
}
// Print out some nice progress information
//
// This isn't super trivial becuase we don't want to print loads and
// loads of information to the console, but we also want to produce a
// faithful representation of what's happening. This is somewhat nuanced
// as a package can start compiling *very* early on because of custom
// build commands and such.
//
// In general, we try to print "Compiling" for the first nontrivial task
// run for a package, regardless of when that is. We then don't print
// out any more information for a package after we've printed it once.
let print =!self.ignored.contains(&pkg.get_package_id());
let print = print &&!self.printed.contains(&pkg.get_package_id());
if print && (stage == Stage::Libraries ||
(total_fresh == Dirty && running.len() > 0)) {
self.printed.insert(pkg.get_package_id());
match total_fresh {
Fresh => try!(config.shell().verbose(|c| {
c.status("Fresh", pkg)
})),
Dirty => try!(config.shell().status("Compiling", pkg))
}
}
for msg in running.iter() {
try!(config.shell().verbose(|c| c.status("Running", msg)));
}
Ok(())
}
}
impl<'a> Dependency for (&'a PackageId, Stage) {
type Context = (&'a Resolve, &'a PackageSet);
fn dependencies(&self, &(resolve, packages): &(&'a Resolve, &'a PackageSet))
-> Vec<(&'a PackageId, Stage)> {
// This implementation of `Dependency` is the driver for the structure
// of the dependency graph of packages to be built. The "key" here is
// a pair of the package being built and the stage that it's at.
//
// Each stage here lists dependencies on the previous stages except for
// the start state which depends on the ending state of all dependent
// packages (as determined by the resolve context).
let (id, stage) = *self;
let pkg = packages.iter().find(|p| p.get_package_id() == id).unwrap();
let deps = resolve.deps(id).into_iter().flat_map(|a| a)
.filter(|dep| *dep!= id)
.map(|dep| {
(dep, pkg.get_dependencies().iter().find(|d| {
d.get_name() == dep.get_name()
}).unwrap())
});
match stage {
Stage::Start => Vec::new(),
// Building the build command itself starts off pretty easily,we
// just need to depend on all of the library stages of our own build
// dependencies (making them available to us).
Stage::BuildCustomBuild => {
let mut base = vec![(id, Stage::Start)];
base.extend(deps.filter(|&(_, dep)| dep.is_build())
.map(|(id, _)| (id, Stage::Libraries)));
base
}
// When running a custom build command, we need to be sure that our
// own custom build command is actually built, and then we need to
// wait for all our dependencies to finish their custom build
// commands themselves (as they may provide input to us).
Stage::RunCustomBuild => {
let mut base = vec![(id, Stage::BuildCustomBuild)];
base.extend(deps.filter(|&(_, dep)| dep.is_transitive())
.map(|(id, _)| (id, Stage::RunCustomBuild)));
base
}
// Building a library depends on our own custom build command plus
// all our transitive dependencies.
Stage::Libraries => {
let mut base = vec![(id, Stage::RunCustomBuild)];
base.extend(deps.filter(|&(_, dep)| dep.is_transitive())
.map(|(id, _)| (id, Stage::Libraries)));
base
}
// Binaries only depend on libraries being available. Note that they
// do not depend on dev-dependencies.
Stage::Binaries => vec![(id, Stage::Libraries)],
// Tests depend on all dependencies (including dev-dependencies) in
// addition to the library stage for this package. Note, however,
// that library tests only need to depend the custom build command
// being run, not the libraries themselves.
Stage::BinaryTests | Stage::LibraryTests => {
let mut base = if stage == Stage::BinaryTests {
vec![(id, Stage::Libraries)]
} else {
vec![(id, Stage::RunCustomBuild)]
};
base.extend(deps.map(|(id, _)| (id, Stage::Libraries)));
base
}
}
}
}
| { entry.insert(fresh); } | conditional_block |
mem.rs | use std::num::SignedInt;
use syntax::{Dir, Left, Right};
// size of allocated memory in bytes
const MEM_SIZE: usize = 65_536; // 64kB!
pub struct Mem {
cells: Box<[u8]>, // address space
ptr: usize // pointer in address space
}
impl Mem {
/// Create a new `Mem` stuct.
#[inline]
pub fn new() -> Mem |
/// Return the value of cell at the current pointer.
#[inline]
pub fn get(&self) -> u8 {
self.cells[self.ptr]
}
/// Set the value at the current pointer.
#[inline]
pub fn set(&mut self, value: u8) {
self.cells[self.ptr] = value;
}
/// Adds `value` to the current cell.
#[inline]
pub fn add(&mut self, value: u8) {
self.cells[self.ptr] += value;
}
/// Subtracts `value` from the current cell.
#[inline]
pub fn subtract(&mut self, value: u8) {
self.cells[self.ptr] -= value;
}
/// Shifts the current pointer to the left or right by a number of steps.
#[inline]
pub fn shift(&mut self, dir: Dir, steps: usize) {
match dir {
Left => self.ptr -= steps,
Right => self.ptr += steps,
}
}
// optimizations
/// Clears the current cell.
#[inline]
pub fn clear(&mut self) {
self.cells[self.ptr] = 0;
}
/// Scans left or right for a zero cell. This fuction will panic! if there
/// is no zero cell before it scans past the beginning of the address space.
#[inline]
pub fn scan(&mut self, dir: Dir) {
while self.cells[self.ptr]!= 0 {
self.shift(dir, 1);
}
}
/// Copys the value of the current cell into the cell left or right a
/// number of steps.
#[inline]
pub fn copy(&mut self, dir: Dir, steps: usize) {
let index = match dir {
Left => self.ptr - steps,
Right => self.ptr + steps,
};
self.cells[index] += self.cells[self.ptr];
}
/// Multiplys the value of the current cell by a factor and inserts the
/// product into the cell left or right a number of steps.
pub fn multiply(&mut self, dir: Dir, steps: usize, factor: i8) {
let index = match dir {
Left => self.ptr - steps,
Right => self.ptr + steps,
};
// safely cast factor to u8
let u8_factor = SignedInt::abs(factor) as u8;
// when factor is 1 it acts like a copy
if factor == 1 {
self.cells[index] += self.cells[self.ptr];
}
// when factor is -1 it acts like the inverse of copy
else if factor == -1 {
self.cells[index] -= self.cells[self.ptr];
}
// when factor is >= 2 it adds the product of the current cell and the
// absolute value of factor to the cell at index
else if factor >= 2 {
self.cells[index] += self.cells[self.ptr] * u8_factor;
}
// when factor is <= -2 it subtracts the product of the current cell and the
// absolute value of factor to the cell at index
else if factor <= -2 {
self.cells[index] -= self.cells[self.ptr] * u8_factor;
}
// when factor is 0 it is ignored, as it would do nothing
else {}
}
}
| {
Mem {
cells: box [0u8; MEM_SIZE],
ptr: 0
}
} | identifier_body |
mem.rs | use std::num::SignedInt;
use syntax::{Dir, Left, Right};
// size of allocated memory in bytes
const MEM_SIZE: usize = 65_536; // 64kB!
pub struct Mem {
cells: Box<[u8]>, // address space
ptr: usize // pointer in address space
}
impl Mem {
/// Create a new `Mem` stuct.
#[inline]
pub fn new() -> Mem {
Mem {
cells: box [0u8; MEM_SIZE],
ptr: 0
}
}
/// Return the value of cell at the current pointer.
#[inline]
pub fn get(&self) -> u8 {
self.cells[self.ptr]
}
/// Set the value at the current pointer.
#[inline]
pub fn set(&mut self, value: u8) {
self.cells[self.ptr] = value;
}
/// Adds `value` to the current cell.
#[inline]
pub fn add(&mut self, value: u8) {
self.cells[self.ptr] += value;
}
/// Subtracts `value` from the current cell.
#[inline]
pub fn subtract(&mut self, value: u8) {
self.cells[self.ptr] -= value;
}
/// Shifts the current pointer to the left or right by a number of steps.
#[inline]
pub fn shift(&mut self, dir: Dir, steps: usize) {
match dir {
Left => self.ptr -= steps,
Right => self.ptr += steps,
}
}
// optimizations
/// Clears the current cell.
#[inline]
pub fn clear(&mut self) {
self.cells[self.ptr] = 0;
}
/// Scans left or right for a zero cell. This fuction will panic! if there
/// is no zero cell before it scans past the beginning of the address space.
#[inline]
pub fn scan(&mut self, dir: Dir) {
while self.cells[self.ptr]!= 0 {
self.shift(dir, 1);
}
}
/// Copys the value of the current cell into the cell left or right a
/// number of steps.
#[inline] | };
self.cells[index] += self.cells[self.ptr];
}
/// Multiplys the value of the current cell by a factor and inserts the
/// product into the cell left or right a number of steps.
pub fn multiply(&mut self, dir: Dir, steps: usize, factor: i8) {
let index = match dir {
Left => self.ptr - steps,
Right => self.ptr + steps,
};
// safely cast factor to u8
let u8_factor = SignedInt::abs(factor) as u8;
// when factor is 1 it acts like a copy
if factor == 1 {
self.cells[index] += self.cells[self.ptr];
}
// when factor is -1 it acts like the inverse of copy
else if factor == -1 {
self.cells[index] -= self.cells[self.ptr];
}
// when factor is >= 2 it adds the product of the current cell and the
// absolute value of factor to the cell at index
else if factor >= 2 {
self.cells[index] += self.cells[self.ptr] * u8_factor;
}
// when factor is <= -2 it subtracts the product of the current cell and the
// absolute value of factor to the cell at index
else if factor <= -2 {
self.cells[index] -= self.cells[self.ptr] * u8_factor;
}
// when factor is 0 it is ignored, as it would do nothing
else {}
}
} | pub fn copy(&mut self, dir: Dir, steps: usize) {
let index = match dir {
Left => self.ptr - steps,
Right => self.ptr + steps, | random_line_split |
mem.rs | use std::num::SignedInt;
use syntax::{Dir, Left, Right};
// size of allocated memory in bytes
const MEM_SIZE: usize = 65_536; // 64kB!
pub struct Mem {
cells: Box<[u8]>, // address space
ptr: usize // pointer in address space
}
impl Mem {
/// Create a new `Mem` stuct.
#[inline]
pub fn new() -> Mem {
Mem {
cells: box [0u8; MEM_SIZE],
ptr: 0
}
}
/// Return the value of cell at the current pointer.
#[inline]
pub fn get(&self) -> u8 {
self.cells[self.ptr]
}
/// Set the value at the current pointer.
#[inline]
pub fn set(&mut self, value: u8) {
self.cells[self.ptr] = value;
}
/// Adds `value` to the current cell.
#[inline]
pub fn add(&mut self, value: u8) {
self.cells[self.ptr] += value;
}
/// Subtracts `value` from the current cell.
#[inline]
pub fn subtract(&mut self, value: u8) {
self.cells[self.ptr] -= value;
}
/// Shifts the current pointer to the left or right by a number of steps.
#[inline]
pub fn shift(&mut self, dir: Dir, steps: usize) {
match dir {
Left => self.ptr -= steps,
Right => self.ptr += steps,
}
}
// optimizations
/// Clears the current cell.
#[inline]
pub fn clear(&mut self) {
self.cells[self.ptr] = 0;
}
/// Scans left or right for a zero cell. This fuction will panic! if there
/// is no zero cell before it scans past the beginning of the address space.
#[inline]
pub fn | (&mut self, dir: Dir) {
while self.cells[self.ptr]!= 0 {
self.shift(dir, 1);
}
}
/// Copys the value of the current cell into the cell left or right a
/// number of steps.
#[inline]
pub fn copy(&mut self, dir: Dir, steps: usize) {
let index = match dir {
Left => self.ptr - steps,
Right => self.ptr + steps,
};
self.cells[index] += self.cells[self.ptr];
}
/// Multiplys the value of the current cell by a factor and inserts the
/// product into the cell left or right a number of steps.
pub fn multiply(&mut self, dir: Dir, steps: usize, factor: i8) {
let index = match dir {
Left => self.ptr - steps,
Right => self.ptr + steps,
};
// safely cast factor to u8
let u8_factor = SignedInt::abs(factor) as u8;
// when factor is 1 it acts like a copy
if factor == 1 {
self.cells[index] += self.cells[self.ptr];
}
// when factor is -1 it acts like the inverse of copy
else if factor == -1 {
self.cells[index] -= self.cells[self.ptr];
}
// when factor is >= 2 it adds the product of the current cell and the
// absolute value of factor to the cell at index
else if factor >= 2 {
self.cells[index] += self.cells[self.ptr] * u8_factor;
}
// when factor is <= -2 it subtracts the product of the current cell and the
// absolute value of factor to the cell at index
else if factor <= -2 {
self.cells[index] -= self.cells[self.ptr] * u8_factor;
}
// when factor is 0 it is ignored, as it would do nothing
else {}
}
}
| scan | identifier_name |
indexedlogauxstore.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::io::Cursor;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use anyhow::bail;
use anyhow::Result;
use byteorder::ReadBytesExt;
use byteorder::WriteBytesExt;
use configparser::config::ConfigSet;
use configparser::convert::ByteCount;
use edenapi_types::ContentId;
use edenapi_types::FileAuxData;
use edenapi_types::Sha1;
use edenapi_types::Sha256;
use indexedlog::log::IndexOutput;
use minibytes::Bytes;
use parking_lot::RwLock;
use types::hgid::ReadHgIdExt;
use types::HgId;
use vlqencoding::VLQDecode;
use vlqencoding::VLQEncode;
use crate::indexedlogutil::Store;
use crate::indexedlogutil::StoreOpenOptions;
use crate::indexedlogutil::StoreType;
/// See edenapi_types::FileAuxData and mononoke_types::ContentMetadata
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
pub struct Entry {
pub(crate) total_size: u64,
pub(crate) content_id: ContentId,
pub(crate) content_sha1: Sha1,
pub(crate) content_sha256: Sha256,
}
impl From<FileAuxData> for Entry {
fn from(v: FileAuxData) -> Self {
Entry {
total_size: v.total_size,
content_id: v.content_id,
content_sha1: v.sha1,
content_sha256: v.sha256,
}
}
}
impl Entry {
pub fn total_size(&self) -> u64 {
self.total_size
}
pub fn content_id(&self) -> ContentId {
self.content_id
}
pub fn content_sha1(&self) -> Sha1 {
self.content_sha1
}
pub fn content_sha256(&self) -> Sha256 {
self.content_sha256
}
/// Serialize the Entry to Bytes.
///
/// The serialization format is as follows:
/// - HgId <20 bytes>
/// - Version <1 byte> (for compatibility)
/// - content_id <32 bytes>
/// - content sha1 <20 bytes>
/// - content sha256 <32 bytes>
/// - total_size <u64 VLQ, 1-9 bytes>
fn serialize(&self, hgid: HgId) -> Result<Bytes> {
let mut buf = Vec::new();
buf.write_all(hgid.as_ref())?;
buf.write_u8(0)?; // write version
buf.write_all(self.content_id.as_ref())?;
buf.write_all(self.content_sha1.as_ref())?;
buf.write_all(self.content_sha256.as_ref())?;
buf.write_vlq(self.total_size)?;
Ok(buf.into())
}
fn deserialize(bytes: Bytes) -> Result<(HgId, Self)> {
let data: &[u8] = bytes.as_ref();
let mut cur = Cursor::new(data);
let hgid = cur.read_hgid()?;
let version = cur.read_u8()?;
if version!= 0 {
bail!("unsupported auxstore entry version {}", version);
}
let mut content_id = [0u8; 32];
cur.read_exact(&mut content_id)?;
let mut content_sha1 = [0u8; 20];
cur.read_exact(&mut content_sha1)?;
let mut content_sha256 = [0u8; 32];
cur.read_exact(&mut content_sha256)?;
let total_size: u64 = cur.read_vlq()?;
Ok((
hgid,
Entry {
content_id: content_id.into(),
content_sha1: content_sha1.into(),
content_sha256: content_sha256.into(),
total_size,
},
))
}
}
pub struct AuxStore(RwLock<Store>);
impl AuxStore {
pub fn new(path: impl AsRef<Path>, config: &ConfigSet, store_type: StoreType) -> Result<Self> {
// TODO(meyer): Eliminate "local" AuxStore - always treat it as shared / cache?
let open_options = AuxStore::open_options(config)?;
let log = match store_type {
StoreType::Local => open_options.local(&path),
StoreType::Shared => open_options.shared(&path),
}?;
Ok(AuxStore(RwLock::new(log)))
}
fn open_options(config: &ConfigSet) -> Result<StoreOpenOptions> {
// TODO(meyer): Decide exactly how we want to configure this store. This is all copied from indexedlogdatastore
// Default configuration: 4 x 2.5GB.
let mut open_options = StoreOpenOptions::new()
.max_log_count(4)
.max_bytes_per_log(2500 * 1000 * 1000)
.auto_sync_threshold(10 * 1024 * 1024)
.create(true)
.index("node", |_| {
vec![IndexOutput::Reference(0..HgId::len() as u64)]
});
if let Some(max_log_count) = config.get_opt::<u8>("indexedlog", "data.max-log-count")? {
open_options = open_options.max_log_count(max_log_count);
}
if let Some(max_bytes_per_log) =
config.get_opt::<ByteCount>("indexedlog", "data.max-bytes-per-log")?
{
open_options = open_options.max_bytes_per_log(max_bytes_per_log.value());
} else if let Some(max_bytes_per_log) =
config.get_opt::<ByteCount>("remotefilelog", "cachelimit")?
|
Ok(open_options)
}
pub fn get(&self, hgid: HgId) -> Result<Option<Entry>> {
let log = self.0.read();
let mut entries = log.lookup(0, &hgid)?;
let slice = match entries.next() {
None => return Ok(None),
Some(slice) => slice?,
};
let bytes = log.slice_to_bytes(slice);
drop(log);
Entry::deserialize(bytes).map(|(_hgid, entry)| Some(entry))
}
pub fn put(&self, hgid: HgId, entry: &Entry) -> Result<()> {
let serialized = entry.serialize(hgid)?;
self.0.write().append(&serialized)
}
pub fn flush(&self) -> Result<()> {
self.0.write().flush()
}
#[cfg(test)]
pub(crate) fn hgids(&self) -> Result<Vec<HgId>> {
let log = self.0.read();
log.iter()
.map(|slice| {
let bytes = log.slice_to_bytes(slice?);
Entry::deserialize(bytes).map(|(hgid, _entry)| hgid)
})
.collect()
}
}
#[cfg(test)]
mod tests {
use std::fs::remove_file;
use std::str::FromStr;
use std::sync::Arc;
use tempfile::TempDir;
use types::testutil::*;
use super::*;
use crate::scmstore::FileAttributes;
use crate::scmstore::FileStore;
use crate::testutil::*;
use crate::ExtStoredPolicy;
use crate::HgIdMutableDeltaStore;
use crate::IndexedLogHgIdDataStore;
fn single_byte_sha1(fst: u8) -> Sha1 {
let mut x: [u8; Sha1::len()] = Default::default();
x[0] = fst;
Sha1::from(x)
}
#[test]
fn test_empty() -> Result<()> {
let tempdir = TempDir::new()?;
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
store.flush()?;
Ok(())
}
#[test]
fn test_add_get() -> Result<()> {
let tempdir = TempDir::new().unwrap();
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
let mut entry = Entry::default();
entry.total_size = 1;
entry.content_sha1 = single_byte_sha1(1);
let k = key("a", "1");
store.put(k.hgid, &entry)?;
store.flush()?;
let found = store.get(k.hgid)?;
assert_eq!(Some(entry), found);
Ok(())
}
#[test]
fn test_lookup_failure() -> Result<()> {
let tempdir = TempDir::new().unwrap();
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
let mut entry = Entry::default();
entry.total_size = 1;
entry.content_sha1 = single_byte_sha1(1);
let k = key("a", "1");
store.put(k.hgid, &entry)?;
store.flush()?;
let k2 = key("b", "2");
let found = store.get(k2.hgid)?;
assert_eq!(None, found);
Ok(())
}
#[test]
fn test_corrupted() -> Result<()> {
let tempdir = TempDir::new()?;
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
let k = key("a", "2");
let mut entry = Entry::default();
entry.total_size = 2;
entry.content_sha1 = single_byte_sha1(2);
store.put(k.hgid, &entry)?;
store.flush()?;
drop(store);
// Corrupt the log by removing the "log" file.
let mut rotate_log_path = tempdir.path().to_path_buf();
rotate_log_path.push("0");
rotate_log_path.push("log");
remove_file(rotate_log_path)?;
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
let k = key("a", "3");
let mut entry = Entry::default();
entry.total_size = 3;
entry.content_sha1 = single_byte_sha1(3);
store.put(k.hgid, &entry)?;
store.flush()?;
// There should be only one key in the store.
assert_eq!(store.hgids().into_iter().count(), 1);
Ok(())
}
#[test]
fn test_scmstore_read() -> Result<()> {
let tmp = TempDir::new()?;
let aux = Arc::new(AuxStore::new(&tmp, &ConfigSet::new(), StoreType::Shared)?);
let mut entry = Entry::default();
entry.total_size = 1;
entry.content_sha1 = single_byte_sha1(1);
let k = key("a", "1");
aux.put(k.hgid, &entry)?;
aux.flush()?;
// Set up local-only FileStore
let mut store = FileStore::empty();
store.aux_local = Some(aux.clone());
// Attempt fetch.
let fetched = store
.fetch(std::iter::once(k.clone()), FileAttributes::AUX)
.single()?
.expect("key not found");
assert_eq!(entry, fetched.aux_data().expect("no aux data found").into());
Ok(())
}
#[test]
fn test_scmstore_compute_read() -> Result<()> {
let k = key("a", "def6f29d7b61f9cb70b2f14f79cd5c43c38e21b2");
let d = delta("1234", None, k.clone());
let meta = Default::default();
// Setup local indexedlog
let tmp = TempDir::new()?;
let content = Arc::new(IndexedLogHgIdDataStore::new(
&tmp,
ExtStoredPolicy::Ignore,
&ConfigSet::new(),
StoreType::Shared,
)?);
content.add(&d, &meta).unwrap();
content.flush().unwrap();
let tmp = TempDir::new()?;
let aux = Arc::new(AuxStore::new(&tmp, &ConfigSet::new(), StoreType::Shared)?);
// Set up local-only FileStore
let mut store = FileStore::empty();
store.cache_to_local_cache = true;
store.indexedlog_local = Some(content.clone());
store.aux_local = Some(aux.clone());
let mut expected = Entry::default();
expected.total_size = 4;
expected.content_id = ContentId::from_str(
"aa6ab85da77ca480b7624172fe44aa9906b6c3f00f06ff23c3e5f60bfd0c414e",
)?;
expected.content_sha1 = Sha1::from_str("7110eda4d09e062aa5e4a390b0a572ac0d2c0220")?;
expected.content_sha256 =
Sha256::from_str("03ac674216f3e15c761ee1a5e255f067953623c8b388b4459e13f978d7c846f4")?;
// Attempt fetch.
let fetched = store
.fetch(std::iter::once(k.clone()), FileAttributes::AUX)
.single()?
.expect("key not found");
assert_eq!(
expected,
fetched.aux_data().expect("no aux data found").into()
);
// Verify we can read it directly too
let found = aux.get(k.hgid)?;
assert_eq!(Some(expected), found);
Ok(())
}
}
| {
let log_count: u64 = open_options.max_log_count.unwrap_or(1).max(1).into();
open_options =
open_options.max_bytes_per_log((max_bytes_per_log.value() / log_count).max(1));
} | conditional_block |
indexedlogauxstore.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::io::Cursor;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use anyhow::bail;
use anyhow::Result;
use byteorder::ReadBytesExt;
use byteorder::WriteBytesExt;
use configparser::config::ConfigSet;
use configparser::convert::ByteCount;
use edenapi_types::ContentId;
use edenapi_types::FileAuxData;
use edenapi_types::Sha1;
use edenapi_types::Sha256;
use indexedlog::log::IndexOutput;
use minibytes::Bytes;
use parking_lot::RwLock;
use types::hgid::ReadHgIdExt;
use types::HgId;
use vlqencoding::VLQDecode;
use vlqencoding::VLQEncode;
use crate::indexedlogutil::Store;
use crate::indexedlogutil::StoreOpenOptions;
use crate::indexedlogutil::StoreType;
/// See edenapi_types::FileAuxData and mononoke_types::ContentMetadata
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
pub struct Entry {
pub(crate) total_size: u64,
pub(crate) content_id: ContentId,
pub(crate) content_sha1: Sha1,
pub(crate) content_sha256: Sha256,
}
impl From<FileAuxData> for Entry {
fn from(v: FileAuxData) -> Self {
Entry {
total_size: v.total_size,
content_id: v.content_id,
content_sha1: v.sha1,
content_sha256: v.sha256,
}
}
}
impl Entry {
pub fn total_size(&self) -> u64 {
self.total_size
}
pub fn content_id(&self) -> ContentId {
self.content_id
}
pub fn content_sha1(&self) -> Sha1 {
self.content_sha1
}
pub fn content_sha256(&self) -> Sha256 {
self.content_sha256
}
/// Serialize the Entry to Bytes.
///
/// The serialization format is as follows:
/// - HgId <20 bytes>
/// - Version <1 byte> (for compatibility)
/// - content_id <32 bytes>
/// - content sha1 <20 bytes>
/// - content sha256 <32 bytes>
/// - total_size <u64 VLQ, 1-9 bytes>
fn serialize(&self, hgid: HgId) -> Result<Bytes> {
let mut buf = Vec::new();
buf.write_all(hgid.as_ref())?;
buf.write_u8(0)?; // write version
buf.write_all(self.content_id.as_ref())?;
buf.write_all(self.content_sha1.as_ref())?;
buf.write_all(self.content_sha256.as_ref())?;
buf.write_vlq(self.total_size)?;
Ok(buf.into())
}
fn deserialize(bytes: Bytes) -> Result<(HgId, Self)> {
let data: &[u8] = bytes.as_ref();
let mut cur = Cursor::new(data);
let hgid = cur.read_hgid()?;
let version = cur.read_u8()?;
if version!= 0 {
bail!("unsupported auxstore entry version {}", version);
}
let mut content_id = [0u8; 32];
cur.read_exact(&mut content_id)?;
let mut content_sha1 = [0u8; 20];
cur.read_exact(&mut content_sha1)?;
let mut content_sha256 = [0u8; 32];
cur.read_exact(&mut content_sha256)?;
let total_size: u64 = cur.read_vlq()?;
Ok((
hgid,
Entry {
content_id: content_id.into(),
content_sha1: content_sha1.into(),
content_sha256: content_sha256.into(),
total_size,
},
))
}
}
pub struct AuxStore(RwLock<Store>);
impl AuxStore {
pub fn new(path: impl AsRef<Path>, config: &ConfigSet, store_type: StoreType) -> Result<Self> {
// TODO(meyer): Eliminate "local" AuxStore - always treat it as shared / cache?
let open_options = AuxStore::open_options(config)?;
let log = match store_type {
StoreType::Local => open_options.local(&path),
StoreType::Shared => open_options.shared(&path),
}?;
Ok(AuxStore(RwLock::new(log)))
}
fn open_options(config: &ConfigSet) -> Result<StoreOpenOptions> {
// TODO(meyer): Decide exactly how we want to configure this store. This is all copied from indexedlogdatastore
// Default configuration: 4 x 2.5GB.
let mut open_options = StoreOpenOptions::new()
.max_log_count(4)
.max_bytes_per_log(2500 * 1000 * 1000)
.auto_sync_threshold(10 * 1024 * 1024)
.create(true)
.index("node", |_| {
vec![IndexOutput::Reference(0..HgId::len() as u64)]
});
if let Some(max_log_count) = config.get_opt::<u8>("indexedlog", "data.max-log-count")? {
open_options = open_options.max_log_count(max_log_count);
}
if let Some(max_bytes_per_log) =
config.get_opt::<ByteCount>("indexedlog", "data.max-bytes-per-log")?
{
open_options = open_options.max_bytes_per_log(max_bytes_per_log.value());
} else if let Some(max_bytes_per_log) =
config.get_opt::<ByteCount>("remotefilelog", "cachelimit")?
{
let log_count: u64 = open_options.max_log_count.unwrap_or(1).max(1).into();
open_options =
open_options.max_bytes_per_log((max_bytes_per_log.value() / log_count).max(1));
}
Ok(open_options)
}
pub fn get(&self, hgid: HgId) -> Result<Option<Entry>> {
let log = self.0.read();
let mut entries = log.lookup(0, &hgid)?;
let slice = match entries.next() {
None => return Ok(None),
Some(slice) => slice?,
};
let bytes = log.slice_to_bytes(slice);
drop(log);
Entry::deserialize(bytes).map(|(_hgid, entry)| Some(entry))
}
pub fn put(&self, hgid: HgId, entry: &Entry) -> Result<()> {
let serialized = entry.serialize(hgid)?;
self.0.write().append(&serialized)
}
pub fn flush(&self) -> Result<()> {
self.0.write().flush()
}
#[cfg(test)]
pub(crate) fn hgids(&self) -> Result<Vec<HgId>> {
let log = self.0.read();
log.iter()
.map(|slice| {
let bytes = log.slice_to_bytes(slice?);
Entry::deserialize(bytes).map(|(hgid, _entry)| hgid)
})
.collect()
}
}
#[cfg(test)]
mod tests {
use std::fs::remove_file;
use std::str::FromStr;
use std::sync::Arc;
use tempfile::TempDir;
use types::testutil::*;
use super::*;
use crate::scmstore::FileAttributes;
use crate::scmstore::FileStore;
use crate::testutil::*;
use crate::ExtStoredPolicy;
use crate::HgIdMutableDeltaStore;
use crate::IndexedLogHgIdDataStore;
fn single_byte_sha1(fst: u8) -> Sha1 {
let mut x: [u8; Sha1::len()] = Default::default();
x[0] = fst;
Sha1::from(x)
}
#[test]
fn | () -> Result<()> {
let tempdir = TempDir::new()?;
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
store.flush()?;
Ok(())
}
#[test]
fn test_add_get() -> Result<()> {
let tempdir = TempDir::new().unwrap();
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
let mut entry = Entry::default();
entry.total_size = 1;
entry.content_sha1 = single_byte_sha1(1);
let k = key("a", "1");
store.put(k.hgid, &entry)?;
store.flush()?;
let found = store.get(k.hgid)?;
assert_eq!(Some(entry), found);
Ok(())
}
#[test]
fn test_lookup_failure() -> Result<()> {
let tempdir = TempDir::new().unwrap();
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
let mut entry = Entry::default();
entry.total_size = 1;
entry.content_sha1 = single_byte_sha1(1);
let k = key("a", "1");
store.put(k.hgid, &entry)?;
store.flush()?;
let k2 = key("b", "2");
let found = store.get(k2.hgid)?;
assert_eq!(None, found);
Ok(())
}
#[test]
fn test_corrupted() -> Result<()> {
let tempdir = TempDir::new()?;
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
let k = key("a", "2");
let mut entry = Entry::default();
entry.total_size = 2;
entry.content_sha1 = single_byte_sha1(2);
store.put(k.hgid, &entry)?;
store.flush()?;
drop(store);
// Corrupt the log by removing the "log" file.
let mut rotate_log_path = tempdir.path().to_path_buf();
rotate_log_path.push("0");
rotate_log_path.push("log");
remove_file(rotate_log_path)?;
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
let k = key("a", "3");
let mut entry = Entry::default();
entry.total_size = 3;
entry.content_sha1 = single_byte_sha1(3);
store.put(k.hgid, &entry)?;
store.flush()?;
// There should be only one key in the store.
assert_eq!(store.hgids().into_iter().count(), 1);
Ok(())
}
#[test]
fn test_scmstore_read() -> Result<()> {
let tmp = TempDir::new()?;
let aux = Arc::new(AuxStore::new(&tmp, &ConfigSet::new(), StoreType::Shared)?);
let mut entry = Entry::default();
entry.total_size = 1;
entry.content_sha1 = single_byte_sha1(1);
let k = key("a", "1");
aux.put(k.hgid, &entry)?;
aux.flush()?;
// Set up local-only FileStore
let mut store = FileStore::empty();
store.aux_local = Some(aux.clone());
// Attempt fetch.
let fetched = store
.fetch(std::iter::once(k.clone()), FileAttributes::AUX)
.single()?
.expect("key not found");
assert_eq!(entry, fetched.aux_data().expect("no aux data found").into());
Ok(())
}
#[test]
fn test_scmstore_compute_read() -> Result<()> {
let k = key("a", "def6f29d7b61f9cb70b2f14f79cd5c43c38e21b2");
let d = delta("1234", None, k.clone());
let meta = Default::default();
// Setup local indexedlog
let tmp = TempDir::new()?;
let content = Arc::new(IndexedLogHgIdDataStore::new(
&tmp,
ExtStoredPolicy::Ignore,
&ConfigSet::new(),
StoreType::Shared,
)?);
content.add(&d, &meta).unwrap();
content.flush().unwrap();
let tmp = TempDir::new()?;
let aux = Arc::new(AuxStore::new(&tmp, &ConfigSet::new(), StoreType::Shared)?);
// Set up local-only FileStore
let mut store = FileStore::empty();
store.cache_to_local_cache = true;
store.indexedlog_local = Some(content.clone());
store.aux_local = Some(aux.clone());
let mut expected = Entry::default();
expected.total_size = 4;
expected.content_id = ContentId::from_str(
"aa6ab85da77ca480b7624172fe44aa9906b6c3f00f06ff23c3e5f60bfd0c414e",
)?;
expected.content_sha1 = Sha1::from_str("7110eda4d09e062aa5e4a390b0a572ac0d2c0220")?;
expected.content_sha256 =
Sha256::from_str("03ac674216f3e15c761ee1a5e255f067953623c8b388b4459e13f978d7c846f4")?;
// Attempt fetch.
let fetched = store
.fetch(std::iter::once(k.clone()), FileAttributes::AUX)
.single()?
.expect("key not found");
assert_eq!(
expected,
fetched.aux_data().expect("no aux data found").into()
);
// Verify we can read it directly too
let found = aux.get(k.hgid)?;
assert_eq!(Some(expected), found);
Ok(())
}
}
| test_empty | identifier_name |
indexedlogauxstore.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::io::Cursor;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use anyhow::bail;
use anyhow::Result;
use byteorder::ReadBytesExt;
use byteorder::WriteBytesExt;
use configparser::config::ConfigSet;
use configparser::convert::ByteCount;
use edenapi_types::ContentId;
use edenapi_types::FileAuxData;
use edenapi_types::Sha1;
use edenapi_types::Sha256;
use indexedlog::log::IndexOutput;
use minibytes::Bytes;
use parking_lot::RwLock;
use types::hgid::ReadHgIdExt;
use types::HgId;
use vlqencoding::VLQDecode;
use vlqencoding::VLQEncode;
use crate::indexedlogutil::Store;
use crate::indexedlogutil::StoreOpenOptions;
use crate::indexedlogutil::StoreType;
/// See edenapi_types::FileAuxData and mononoke_types::ContentMetadata
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
pub struct Entry {
pub(crate) total_size: u64,
pub(crate) content_id: ContentId,
pub(crate) content_sha1: Sha1,
pub(crate) content_sha256: Sha256,
}
impl From<FileAuxData> for Entry {
fn from(v: FileAuxData) -> Self {
Entry {
total_size: v.total_size,
content_id: v.content_id,
content_sha1: v.sha1,
content_sha256: v.sha256,
}
}
}
impl Entry {
pub fn total_size(&self) -> u64 {
self.total_size
}
pub fn content_id(&self) -> ContentId {
self.content_id
}
pub fn content_sha1(&self) -> Sha1 {
self.content_sha1
}
pub fn content_sha256(&self) -> Sha256 {
self.content_sha256
}
/// Serialize the Entry to Bytes.
///
/// The serialization format is as follows:
/// - HgId <20 bytes>
/// - Version <1 byte> (for compatibility)
/// - content_id <32 bytes>
/// - content sha1 <20 bytes>
/// - content sha256 <32 bytes>
/// - total_size <u64 VLQ, 1-9 bytes>
fn serialize(&self, hgid: HgId) -> Result<Bytes> |
fn deserialize(bytes: Bytes) -> Result<(HgId, Self)> {
let data: &[u8] = bytes.as_ref();
let mut cur = Cursor::new(data);
let hgid = cur.read_hgid()?;
let version = cur.read_u8()?;
if version!= 0 {
bail!("unsupported auxstore entry version {}", version);
}
let mut content_id = [0u8; 32];
cur.read_exact(&mut content_id)?;
let mut content_sha1 = [0u8; 20];
cur.read_exact(&mut content_sha1)?;
let mut content_sha256 = [0u8; 32];
cur.read_exact(&mut content_sha256)?;
let total_size: u64 = cur.read_vlq()?;
Ok((
hgid,
Entry {
content_id: content_id.into(),
content_sha1: content_sha1.into(),
content_sha256: content_sha256.into(),
total_size,
},
))
}
}
pub struct AuxStore(RwLock<Store>);
impl AuxStore {
pub fn new(path: impl AsRef<Path>, config: &ConfigSet, store_type: StoreType) -> Result<Self> {
// TODO(meyer): Eliminate "local" AuxStore - always treat it as shared / cache?
let open_options = AuxStore::open_options(config)?;
let log = match store_type {
StoreType::Local => open_options.local(&path),
StoreType::Shared => open_options.shared(&path),
}?;
Ok(AuxStore(RwLock::new(log)))
}
fn open_options(config: &ConfigSet) -> Result<StoreOpenOptions> {
// TODO(meyer): Decide exactly how we want to configure this store. This is all copied from indexedlogdatastore
// Default configuration: 4 x 2.5GB.
let mut open_options = StoreOpenOptions::new()
.max_log_count(4)
.max_bytes_per_log(2500 * 1000 * 1000)
.auto_sync_threshold(10 * 1024 * 1024)
.create(true)
.index("node", |_| {
vec![IndexOutput::Reference(0..HgId::len() as u64)]
});
if let Some(max_log_count) = config.get_opt::<u8>("indexedlog", "data.max-log-count")? {
open_options = open_options.max_log_count(max_log_count);
}
if let Some(max_bytes_per_log) =
config.get_opt::<ByteCount>("indexedlog", "data.max-bytes-per-log")?
{
open_options = open_options.max_bytes_per_log(max_bytes_per_log.value());
} else if let Some(max_bytes_per_log) =
config.get_opt::<ByteCount>("remotefilelog", "cachelimit")?
{
let log_count: u64 = open_options.max_log_count.unwrap_or(1).max(1).into();
open_options =
open_options.max_bytes_per_log((max_bytes_per_log.value() / log_count).max(1));
}
Ok(open_options)
}
pub fn get(&self, hgid: HgId) -> Result<Option<Entry>> {
let log = self.0.read();
let mut entries = log.lookup(0, &hgid)?;
let slice = match entries.next() {
None => return Ok(None),
Some(slice) => slice?,
};
let bytes = log.slice_to_bytes(slice);
drop(log);
Entry::deserialize(bytes).map(|(_hgid, entry)| Some(entry))
}
pub fn put(&self, hgid: HgId, entry: &Entry) -> Result<()> {
let serialized = entry.serialize(hgid)?;
self.0.write().append(&serialized)
}
pub fn flush(&self) -> Result<()> {
self.0.write().flush()
}
#[cfg(test)]
pub(crate) fn hgids(&self) -> Result<Vec<HgId>> {
let log = self.0.read();
log.iter()
.map(|slice| {
let bytes = log.slice_to_bytes(slice?);
Entry::deserialize(bytes).map(|(hgid, _entry)| hgid)
})
.collect()
}
}
#[cfg(test)]
mod tests {
use std::fs::remove_file;
use std::str::FromStr;
use std::sync::Arc;
use tempfile::TempDir;
use types::testutil::*;
use super::*;
use crate::scmstore::FileAttributes;
use crate::scmstore::FileStore;
use crate::testutil::*;
use crate::ExtStoredPolicy;
use crate::HgIdMutableDeltaStore;
use crate::IndexedLogHgIdDataStore;
fn single_byte_sha1(fst: u8) -> Sha1 {
let mut x: [u8; Sha1::len()] = Default::default();
x[0] = fst;
Sha1::from(x)
}
#[test]
fn test_empty() -> Result<()> {
let tempdir = TempDir::new()?;
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
store.flush()?;
Ok(())
}
#[test]
fn test_add_get() -> Result<()> {
let tempdir = TempDir::new().unwrap();
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
let mut entry = Entry::default();
entry.total_size = 1;
entry.content_sha1 = single_byte_sha1(1);
let k = key("a", "1");
store.put(k.hgid, &entry)?;
store.flush()?;
let found = store.get(k.hgid)?;
assert_eq!(Some(entry), found);
Ok(())
}
#[test]
fn test_lookup_failure() -> Result<()> {
let tempdir = TempDir::new().unwrap();
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
let mut entry = Entry::default();
entry.total_size = 1;
entry.content_sha1 = single_byte_sha1(1);
let k = key("a", "1");
store.put(k.hgid, &entry)?;
store.flush()?;
let k2 = key("b", "2");
let found = store.get(k2.hgid)?;
assert_eq!(None, found);
Ok(())
}
#[test]
fn test_corrupted() -> Result<()> {
let tempdir = TempDir::new()?;
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
let k = key("a", "2");
let mut entry = Entry::default();
entry.total_size = 2;
entry.content_sha1 = single_byte_sha1(2);
store.put(k.hgid, &entry)?;
store.flush()?;
drop(store);
// Corrupt the log by removing the "log" file.
let mut rotate_log_path = tempdir.path().to_path_buf();
rotate_log_path.push("0");
rotate_log_path.push("log");
remove_file(rotate_log_path)?;
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
let k = key("a", "3");
let mut entry = Entry::default();
entry.total_size = 3;
entry.content_sha1 = single_byte_sha1(3);
store.put(k.hgid, &entry)?;
store.flush()?;
// There should be only one key in the store.
assert_eq!(store.hgids().into_iter().count(), 1);
Ok(())
}
#[test]
fn test_scmstore_read() -> Result<()> {
let tmp = TempDir::new()?;
let aux = Arc::new(AuxStore::new(&tmp, &ConfigSet::new(), StoreType::Shared)?);
let mut entry = Entry::default();
entry.total_size = 1;
entry.content_sha1 = single_byte_sha1(1);
let k = key("a", "1");
aux.put(k.hgid, &entry)?;
aux.flush()?;
// Set up local-only FileStore
let mut store = FileStore::empty();
store.aux_local = Some(aux.clone());
// Attempt fetch.
let fetched = store
.fetch(std::iter::once(k.clone()), FileAttributes::AUX)
.single()?
.expect("key not found");
assert_eq!(entry, fetched.aux_data().expect("no aux data found").into());
Ok(())
}
#[test]
fn test_scmstore_compute_read() -> Result<()> {
let k = key("a", "def6f29d7b61f9cb70b2f14f79cd5c43c38e21b2");
let d = delta("1234", None, k.clone());
let meta = Default::default();
// Setup local indexedlog
let tmp = TempDir::new()?;
let content = Arc::new(IndexedLogHgIdDataStore::new(
&tmp,
ExtStoredPolicy::Ignore,
&ConfigSet::new(),
StoreType::Shared,
)?);
content.add(&d, &meta).unwrap();
content.flush().unwrap();
let tmp = TempDir::new()?;
let aux = Arc::new(AuxStore::new(&tmp, &ConfigSet::new(), StoreType::Shared)?);
// Set up local-only FileStore
let mut store = FileStore::empty();
store.cache_to_local_cache = true;
store.indexedlog_local = Some(content.clone());
store.aux_local = Some(aux.clone());
let mut expected = Entry::default();
expected.total_size = 4;
expected.content_id = ContentId::from_str(
"aa6ab85da77ca480b7624172fe44aa9906b6c3f00f06ff23c3e5f60bfd0c414e",
)?;
expected.content_sha1 = Sha1::from_str("7110eda4d09e062aa5e4a390b0a572ac0d2c0220")?;
expected.content_sha256 =
Sha256::from_str("03ac674216f3e15c761ee1a5e255f067953623c8b388b4459e13f978d7c846f4")?;
// Attempt fetch.
let fetched = store
.fetch(std::iter::once(k.clone()), FileAttributes::AUX)
.single()?
.expect("key not found");
assert_eq!(
expected,
fetched.aux_data().expect("no aux data found").into()
);
// Verify we can read it directly too
let found = aux.get(k.hgid)?;
assert_eq!(Some(expected), found);
Ok(())
}
}
| {
let mut buf = Vec::new();
buf.write_all(hgid.as_ref())?;
buf.write_u8(0)?; // write version
buf.write_all(self.content_id.as_ref())?;
buf.write_all(self.content_sha1.as_ref())?;
buf.write_all(self.content_sha256.as_ref())?;
buf.write_vlq(self.total_size)?;
Ok(buf.into())
} | identifier_body |
indexedlogauxstore.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::io::Cursor;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use anyhow::bail;
use anyhow::Result;
use byteorder::ReadBytesExt;
use byteorder::WriteBytesExt;
use configparser::config::ConfigSet;
use configparser::convert::ByteCount;
use edenapi_types::ContentId;
use edenapi_types::FileAuxData;
use edenapi_types::Sha1;
use edenapi_types::Sha256;
use indexedlog::log::IndexOutput;
use minibytes::Bytes;
use parking_lot::RwLock;
use types::hgid::ReadHgIdExt;
use types::HgId;
use vlqencoding::VLQDecode;
use vlqencoding::VLQEncode;
use crate::indexedlogutil::Store;
use crate::indexedlogutil::StoreOpenOptions;
use crate::indexedlogutil::StoreType;
/// See edenapi_types::FileAuxData and mononoke_types::ContentMetadata
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
pub struct Entry {
pub(crate) total_size: u64,
pub(crate) content_id: ContentId,
pub(crate) content_sha1: Sha1,
pub(crate) content_sha256: Sha256,
}
impl From<FileAuxData> for Entry {
fn from(v: FileAuxData) -> Self {
Entry {
total_size: v.total_size,
content_id: v.content_id,
content_sha1: v.sha1,
content_sha256: v.sha256,
}
}
}
impl Entry {
pub fn total_size(&self) -> u64 {
self.total_size
}
pub fn content_id(&self) -> ContentId {
self.content_id
}
pub fn content_sha1(&self) -> Sha1 {
self.content_sha1
}
pub fn content_sha256(&self) -> Sha256 {
self.content_sha256
}
/// Serialize the Entry to Bytes.
///
/// The serialization format is as follows:
/// - HgId <20 bytes>
/// - Version <1 byte> (for compatibility)
/// - content_id <32 bytes>
/// - content sha1 <20 bytes>
/// - content sha256 <32 bytes>
/// - total_size <u64 VLQ, 1-9 bytes>
fn serialize(&self, hgid: HgId) -> Result<Bytes> {
let mut buf = Vec::new();
buf.write_all(hgid.as_ref())?;
buf.write_u8(0)?; // write version
buf.write_all(self.content_id.as_ref())?;
buf.write_all(self.content_sha1.as_ref())?;
buf.write_all(self.content_sha256.as_ref())?;
buf.write_vlq(self.total_size)?;
Ok(buf.into())
}
fn deserialize(bytes: Bytes) -> Result<(HgId, Self)> {
let data: &[u8] = bytes.as_ref();
let mut cur = Cursor::new(data);
let hgid = cur.read_hgid()?;
let version = cur.read_u8()?;
if version!= 0 {
bail!("unsupported auxstore entry version {}", version);
}
let mut content_id = [0u8; 32];
cur.read_exact(&mut content_id)?;
let mut content_sha1 = [0u8; 20];
cur.read_exact(&mut content_sha1)?;
let mut content_sha256 = [0u8; 32];
cur.read_exact(&mut content_sha256)?;
let total_size: u64 = cur.read_vlq()?;
Ok((
hgid,
Entry {
content_id: content_id.into(),
content_sha1: content_sha1.into(),
content_sha256: content_sha256.into(),
total_size,
},
))
}
}
pub struct AuxStore(RwLock<Store>);
impl AuxStore {
pub fn new(path: impl AsRef<Path>, config: &ConfigSet, store_type: StoreType) -> Result<Self> {
// TODO(meyer): Eliminate "local" AuxStore - always treat it as shared / cache?
let open_options = AuxStore::open_options(config)?;
let log = match store_type {
StoreType::Local => open_options.local(&path),
StoreType::Shared => open_options.shared(&path),
}?;
Ok(AuxStore(RwLock::new(log)))
}
fn open_options(config: &ConfigSet) -> Result<StoreOpenOptions> {
// TODO(meyer): Decide exactly how we want to configure this store. This is all copied from indexedlogdatastore
// Default configuration: 4 x 2.5GB.
let mut open_options = StoreOpenOptions::new()
.max_log_count(4)
.max_bytes_per_log(2500 * 1000 * 1000)
.auto_sync_threshold(10 * 1024 * 1024)
.create(true)
.index("node", |_| {
vec![IndexOutput::Reference(0..HgId::len() as u64)]
});
if let Some(max_log_count) = config.get_opt::<u8>("indexedlog", "data.max-log-count")? {
open_options = open_options.max_log_count(max_log_count);
}
if let Some(max_bytes_per_log) =
config.get_opt::<ByteCount>("indexedlog", "data.max-bytes-per-log")?
{
open_options = open_options.max_bytes_per_log(max_bytes_per_log.value());
} else if let Some(max_bytes_per_log) =
config.get_opt::<ByteCount>("remotefilelog", "cachelimit")?
{
let log_count: u64 = open_options.max_log_count.unwrap_or(1).max(1).into();
open_options =
open_options.max_bytes_per_log((max_bytes_per_log.value() / log_count).max(1));
}
Ok(open_options)
}
pub fn get(&self, hgid: HgId) -> Result<Option<Entry>> {
let log = self.0.read();
let mut entries = log.lookup(0, &hgid)?;
let slice = match entries.next() {
None => return Ok(None),
Some(slice) => slice?,
};
let bytes = log.slice_to_bytes(slice);
drop(log);
Entry::deserialize(bytes).map(|(_hgid, entry)| Some(entry))
}
pub fn put(&self, hgid: HgId, entry: &Entry) -> Result<()> {
let serialized = entry.serialize(hgid)?;
self.0.write().append(&serialized)
}
pub fn flush(&self) -> Result<()> {
self.0.write().flush()
}
#[cfg(test)]
pub(crate) fn hgids(&self) -> Result<Vec<HgId>> {
let log = self.0.read();
log.iter()
.map(|slice| {
let bytes = log.slice_to_bytes(slice?);
Entry::deserialize(bytes).map(|(hgid, _entry)| hgid)
})
.collect()
}
}
#[cfg(test)]
mod tests {
use std::fs::remove_file;
use std::str::FromStr;
use std::sync::Arc;
use tempfile::TempDir; | use types::testutil::*;
use super::*;
use crate::scmstore::FileAttributes;
use crate::scmstore::FileStore;
use crate::testutil::*;
use crate::ExtStoredPolicy;
use crate::HgIdMutableDeltaStore;
use crate::IndexedLogHgIdDataStore;
fn single_byte_sha1(fst: u8) -> Sha1 {
let mut x: [u8; Sha1::len()] = Default::default();
x[0] = fst;
Sha1::from(x)
}
#[test]
fn test_empty() -> Result<()> {
let tempdir = TempDir::new()?;
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
store.flush()?;
Ok(())
}
#[test]
fn test_add_get() -> Result<()> {
let tempdir = TempDir::new().unwrap();
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
let mut entry = Entry::default();
entry.total_size = 1;
entry.content_sha1 = single_byte_sha1(1);
let k = key("a", "1");
store.put(k.hgid, &entry)?;
store.flush()?;
let found = store.get(k.hgid)?;
assert_eq!(Some(entry), found);
Ok(())
}
#[test]
fn test_lookup_failure() -> Result<()> {
let tempdir = TempDir::new().unwrap();
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
let mut entry = Entry::default();
entry.total_size = 1;
entry.content_sha1 = single_byte_sha1(1);
let k = key("a", "1");
store.put(k.hgid, &entry)?;
store.flush()?;
let k2 = key("b", "2");
let found = store.get(k2.hgid)?;
assert_eq!(None, found);
Ok(())
}
#[test]
fn test_corrupted() -> Result<()> {
let tempdir = TempDir::new()?;
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
let k = key("a", "2");
let mut entry = Entry::default();
entry.total_size = 2;
entry.content_sha1 = single_byte_sha1(2);
store.put(k.hgid, &entry)?;
store.flush()?;
drop(store);
// Corrupt the log by removing the "log" file.
let mut rotate_log_path = tempdir.path().to_path_buf();
rotate_log_path.push("0");
rotate_log_path.push("log");
remove_file(rotate_log_path)?;
let store = AuxStore::new(&tempdir, &ConfigSet::new(), StoreType::Shared)?;
let k = key("a", "3");
let mut entry = Entry::default();
entry.total_size = 3;
entry.content_sha1 = single_byte_sha1(3);
store.put(k.hgid, &entry)?;
store.flush()?;
// There should be only one key in the store.
assert_eq!(store.hgids().into_iter().count(), 1);
Ok(())
}
#[test]
fn test_scmstore_read() -> Result<()> {
let tmp = TempDir::new()?;
let aux = Arc::new(AuxStore::new(&tmp, &ConfigSet::new(), StoreType::Shared)?);
let mut entry = Entry::default();
entry.total_size = 1;
entry.content_sha1 = single_byte_sha1(1);
let k = key("a", "1");
aux.put(k.hgid, &entry)?;
aux.flush()?;
// Set up local-only FileStore
let mut store = FileStore::empty();
store.aux_local = Some(aux.clone());
// Attempt fetch.
let fetched = store
.fetch(std::iter::once(k.clone()), FileAttributes::AUX)
.single()?
.expect("key not found");
assert_eq!(entry, fetched.aux_data().expect("no aux data found").into());
Ok(())
}
#[test]
fn test_scmstore_compute_read() -> Result<()> {
let k = key("a", "def6f29d7b61f9cb70b2f14f79cd5c43c38e21b2");
let d = delta("1234", None, k.clone());
let meta = Default::default();
// Setup local indexedlog
let tmp = TempDir::new()?;
let content = Arc::new(IndexedLogHgIdDataStore::new(
&tmp,
ExtStoredPolicy::Ignore,
&ConfigSet::new(),
StoreType::Shared,
)?);
content.add(&d, &meta).unwrap();
content.flush().unwrap();
let tmp = TempDir::new()?;
let aux = Arc::new(AuxStore::new(&tmp, &ConfigSet::new(), StoreType::Shared)?);
// Set up local-only FileStore
let mut store = FileStore::empty();
store.cache_to_local_cache = true;
store.indexedlog_local = Some(content.clone());
store.aux_local = Some(aux.clone());
let mut expected = Entry::default();
expected.total_size = 4;
expected.content_id = ContentId::from_str(
"aa6ab85da77ca480b7624172fe44aa9906b6c3f00f06ff23c3e5f60bfd0c414e",
)?;
expected.content_sha1 = Sha1::from_str("7110eda4d09e062aa5e4a390b0a572ac0d2c0220")?;
expected.content_sha256 =
Sha256::from_str("03ac674216f3e15c761ee1a5e255f067953623c8b388b4459e13f978d7c846f4")?;
// Attempt fetch.
let fetched = store
.fetch(std::iter::once(k.clone()), FileAttributes::AUX)
.single()?
.expect("key not found");
assert_eq!(
expected,
fetched.aux_data().expect("no aux data found").into()
);
// Verify we can read it directly too
let found = aux.get(k.hgid)?;
assert_eq!(Some(expected), found);
Ok(())
}
} | random_line_split |
|
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(box_syntax)]
#![feature(custom_derive)]
#![feature(plugin)]
#![feature(mpsc_select)]
#![feature(plugin)]
#![plugin(plugins)]
#![deny(unsafe_code)]
#![plugin(serde_macros)]
extern crate app_units;
extern crate azure;
extern crate canvas;
extern crate canvas_traits;
extern crate clipboard;
#[cfg(target_os = "macos")]
extern crate core_graphics;
#[cfg(target_os = "macos")]
extern crate core_text;
extern crate devtools_traits;
extern crate euclid;
#[cfg(not(target_os = "windows"))]
extern crate gaol;
extern crate gfx;
extern crate gfx_traits;
extern crate gleam;
extern crate image;
extern crate ipc_channel;
extern crate layers;
extern crate layout_traits;
#[macro_use]
extern crate log;
extern crate msg;
extern crate net_traits;
extern crate num;
extern crate offscreen_gl_context;
#[macro_use]
extern crate profile_traits;
extern crate rand;
extern crate script_traits;
extern crate serde;
extern crate style_traits;
extern crate time;
extern crate url;
#[macro_use]
extern crate util;
extern crate webrender;
extern crate webrender_traits;
pub use compositor_thread::{CompositorEventListener, CompositorProxy, CompositorThread};
pub use constellation::Constellation;
use euclid::size::{Size2D};
use gfx_traits::Epoch;
use ipc_channel::ipc::{IpcSender};
use msg::constellation_msg::{FrameId, Key, KeyState, KeyModifiers, LoadData};
use msg::constellation_msg::{NavigationDirection, PipelineId, SubpageId};
use msg::constellation_msg::{WebDriverCommandMsg, WindowSizeData};
use std::collections::HashMap;
use url::Url;
mod compositor;
mod compositor_layer;
pub mod compositor_thread;
pub mod constellation;
mod delayed_composition;
pub mod pipeline;
#[cfg(not(target_os = "windows"))]
pub mod sandboxing;
mod surface_map;
mod timer_scheduler;
mod touch;
pub mod windowing;
/// Specifies whether the script or layout thread needs to be ticked for animation.
#[derive(Deserialize, Serialize)]
pub enum | {
Script,
Layout,
}
/// Messages from the compositor to the constellation.
#[derive(Deserialize, Serialize)]
pub enum CompositorMsg {
Exit,
FrameSize(PipelineId, Size2D<f32>),
/// Request that the constellation send the FrameId corresponding to the document
/// with the provided pipeline id
GetFrame(PipelineId, IpcSender<Option<FrameId>>),
/// Request that the constellation send the current pipeline id for the provided frame
/// id, or for the root frame if this is None, over a provided channel
GetPipeline(Option<FrameId>, IpcSender<Option<PipelineId>>),
/// Requests that the constellation inform the compositor of the title of the pipeline
/// immediately.
GetPipelineTitle(PipelineId),
InitLoadUrl(Url),
/// Query the constellation to see if the current compositor output is stable
IsReadyToSaveImage(HashMap<PipelineId, Epoch>),
KeyEvent(Key, KeyState, KeyModifiers),
LoadUrl(PipelineId, LoadData),
Navigate(Option<(PipelineId, SubpageId)>, NavigationDirection),
ResizedWindow(WindowSizeData),
/// Requests that the constellation instruct layout to begin a new tick of the animation.
TickAnimation(PipelineId, AnimationTickType),
/// Dispatch a webdriver command
WebDriverCommand(WebDriverCommandMsg),
}
| AnimationTickType | identifier_name |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(box_syntax)]
#![feature(custom_derive)]
#![feature(plugin)]
#![feature(mpsc_select)]
#![feature(plugin)]
#![plugin(plugins)]
#![deny(unsafe_code)]
#![plugin(serde_macros)]
extern crate app_units;
extern crate azure;
extern crate canvas;
extern crate canvas_traits;
extern crate clipboard;
#[cfg(target_os = "macos")]
extern crate core_graphics;
#[cfg(target_os = "macos")]
extern crate core_text;
extern crate devtools_traits;
extern crate euclid;
#[cfg(not(target_os = "windows"))]
extern crate gaol;
extern crate gfx;
extern crate gfx_traits;
extern crate gleam;
extern crate image; | extern crate layout_traits;
#[macro_use]
extern crate log;
extern crate msg;
extern crate net_traits;
extern crate num;
extern crate offscreen_gl_context;
#[macro_use]
extern crate profile_traits;
extern crate rand;
extern crate script_traits;
extern crate serde;
extern crate style_traits;
extern crate time;
extern crate url;
#[macro_use]
extern crate util;
extern crate webrender;
extern crate webrender_traits;
pub use compositor_thread::{CompositorEventListener, CompositorProxy, CompositorThread};
pub use constellation::Constellation;
use euclid::size::{Size2D};
use gfx_traits::Epoch;
use ipc_channel::ipc::{IpcSender};
use msg::constellation_msg::{FrameId, Key, KeyState, KeyModifiers, LoadData};
use msg::constellation_msg::{NavigationDirection, PipelineId, SubpageId};
use msg::constellation_msg::{WebDriverCommandMsg, WindowSizeData};
use std::collections::HashMap;
use url::Url;
mod compositor;
mod compositor_layer;
pub mod compositor_thread;
pub mod constellation;
mod delayed_composition;
pub mod pipeline;
#[cfg(not(target_os = "windows"))]
pub mod sandboxing;
mod surface_map;
mod timer_scheduler;
mod touch;
pub mod windowing;
/// Specifies whether the script or layout thread needs to be ticked for animation.
#[derive(Deserialize, Serialize)]
pub enum AnimationTickType {
Script,
Layout,
}
/// Messages from the compositor to the constellation.
#[derive(Deserialize, Serialize)]
pub enum CompositorMsg {
Exit,
FrameSize(PipelineId, Size2D<f32>),
/// Request that the constellation send the FrameId corresponding to the document
/// with the provided pipeline id
GetFrame(PipelineId, IpcSender<Option<FrameId>>),
/// Request that the constellation send the current pipeline id for the provided frame
/// id, or for the root frame if this is None, over a provided channel
GetPipeline(Option<FrameId>, IpcSender<Option<PipelineId>>),
/// Requests that the constellation inform the compositor of the title of the pipeline
/// immediately.
GetPipelineTitle(PipelineId),
InitLoadUrl(Url),
/// Query the constellation to see if the current compositor output is stable
IsReadyToSaveImage(HashMap<PipelineId, Epoch>),
KeyEvent(Key, KeyState, KeyModifiers),
LoadUrl(PipelineId, LoadData),
Navigate(Option<(PipelineId, SubpageId)>, NavigationDirection),
ResizedWindow(WindowSizeData),
/// Requests that the constellation instruct layout to begin a new tick of the animation.
TickAnimation(PipelineId, AnimationTickType),
/// Dispatch a webdriver command
WebDriverCommand(WebDriverCommandMsg),
} | extern crate ipc_channel;
extern crate layers; | random_line_split |
main.rs | /*
Full Monkey.
Generic preprocessor tool.
Copyright 2016 Sam Saint-Pettersen.
Released under the MIT/X11 License.
*/
extern crate clioptions;
extern crate regex;
use clioptions::CliOptions;
use regex::Regex;
use std::io::{BufRead, BufReader, Write};
use std::fs::File;
use std::process::exit;
fn preprocess(input: &str, output: &str, conditions: &str, verbose: bool) {
let mut loc: Vec<String> = Vec::new();
let mut preprocessed: Vec<String> = Vec::new();
let mut cond = String::new();
let mut set_conds: Vec<String> = Vec::new();
let mut prefixed: Vec<String> = Vec::new();
let mut prefixes: Vec<String> = Vec::new();
let mut in_pp = false;
let conditions = conditions.split(",");
for sc in conditions {
set_conds.push(sc.to_string());
}
let f = File::open(input).unwrap();
let file = BufReader::new(&f);
for l in file.lines() {
let l = l.unwrap();
// Process prefix...
let mut p = Regex::new("#prefix (.*) with (.*)").unwrap();
if p.is_match(&l) {
for cap in p.captures_iter(&l) {
prefixed.push(cap.at(1).unwrap().to_string());
prefixes.push(cap.at(2).unwrap().to_string());
}
continue;
}
// Process conditional (if/else/elseif)...
p = Regex::new("#[else]*[if]* (.*)").unwrap();
if p.is_match(&l) {
for cap in p.captures_iter(&l) {
cond = cap.at(1).unwrap().to_string();
in_pp = true;
}
continue;
}
// Process end block...
p = Regex::new("#[fi|endif]").unwrap();
if p.is_match(&l) {
in_pp = false;
continue;
}
// Push relevant LoC to vector...
for sc in set_conds.clone() {
if in_pp && cond == sc {
preprocessed.push(l.to_string());
}
}
if!in_pp {
preprocessed.push(l.to_string());
continue;
}
}
// Do any alterations:
for line in preprocessed {
let mut fl = line;
for (i, p) in prefixed.iter().enumerate() {
let r = Regex::new(®ex::quote(&p)).unwrap();
let repl = format!("{}{}", &prefixes[i], &p);
fl = r.replace_all(&fl, &repl[..]);
}
loc.push(fl);
}
loc.push(String::new());
if verbose {
println!("Preprocessing {} --> {}", input, output);
}
let mut w = File::create(output).unwrap();
let _ = w.write_all(loc.join("\n").as_bytes());
}
fn display_version() {
println!("Full Monkey v. 0.1");
println!(r" __");
println!(r"w c(..)o (");
println!(r" \__(-) __)");
println!(r" /\ (");
println!(r" /(_)___)");
println!(r" w /|");
println!(r" | \");
println!(r" m m");
println!("\nMonkey appears courtesy of ejm97:");
println!("http://www.ascii-art.de/ascii/mno/monkey.txt");
exit(0);
}
fn display_error(program: &str, error: &str) {
println!("Error: {}.", error);
display_usage(&program, -1);
}
fn | (program: &str, exit_code: i32) {
println!("\nFull Monkey.");
println!("Generic preprocessor tool.");
println!("\nCopyright 2016 Sam Saint-Pettersen.");
println!("Released under the MIT/X11 License.");
println!("\n{} -f|--file <input> [-c|--condition <condition(s)>] -o|--out <output>", program);
println!("[-l|--verbose][-h|--help | -v|--version]");
println!("\n-f|--file: File to run preprocessing on.");
println!("-c|--conditon: Comma delimited list of conditon(s) to apply.");
println!("-o|--out: File to output preprocessed LoC to.");
println!("-l|--verbose: Display message to console on process.");
println!("-h|--help: Display this help information and exit.");
println!("-v|--version: Display program version and exit.");
exit(exit_code);
}
fn main() {
let cli = CliOptions::new("fm");
let program = cli.get_program();
let mut input = String::new();
let mut output = String::new();
let mut conditions = String::new();
let mut verbose = false;
if cli.get_num() > 1 {
for (i, a) in cli.get_args().iter().enumerate() {
match a.trim() {
"-h" | "--help" => display_usage(&program, 0),
"-v" | "--version" => display_version(),
"-f" | "--file" => input = cli.next_argument(i),
"-c" | "--condition" => conditions = cli.next_argument(i),
"-o" | "--out" => output = cli.next_argument(i),
"-l" | "--verbose" => verbose = true,
_ => continue,
}
}
if input.is_empty() {
display_error(&program, "No input specified");
}
if output.is_empty() {
display_error(&program, "No output specified");
}
preprocess(&input, &output, &conditions, verbose);
}
else {
display_error(&program, "No options specified");
}
}
| display_usage | identifier_name |
main.rs | /*
Full Monkey.
Generic preprocessor tool.
Copyright 2016 Sam Saint-Pettersen.
Released under the MIT/X11 License.
*/
extern crate clioptions;
extern crate regex;
use clioptions::CliOptions;
use regex::Regex;
use std::io::{BufRead, BufReader, Write};
use std::fs::File;
use std::process::exit;
fn preprocess(input: &str, output: &str, conditions: &str, verbose: bool) | let mut p = Regex::new("#prefix (.*) with (.*)").unwrap();
if p.is_match(&l) {
for cap in p.captures_iter(&l) {
prefixed.push(cap.at(1).unwrap().to_string());
prefixes.push(cap.at(2).unwrap().to_string());
}
continue;
}
// Process conditional (if/else/elseif)...
p = Regex::new("#[else]*[if]* (.*)").unwrap();
if p.is_match(&l) {
for cap in p.captures_iter(&l) {
cond = cap.at(1).unwrap().to_string();
in_pp = true;
}
continue;
}
// Process end block...
p = Regex::new("#[fi|endif]").unwrap();
if p.is_match(&l) {
in_pp = false;
continue;
}
// Push relevant LoC to vector...
for sc in set_conds.clone() {
if in_pp && cond == sc {
preprocessed.push(l.to_string());
}
}
if!in_pp {
preprocessed.push(l.to_string());
continue;
}
}
// Do any alterations:
for line in preprocessed {
let mut fl = line;
for (i, p) in prefixed.iter().enumerate() {
let r = Regex::new(®ex::quote(&p)).unwrap();
let repl = format!("{}{}", &prefixes[i], &p);
fl = r.replace_all(&fl, &repl[..]);
}
loc.push(fl);
}
loc.push(String::new());
if verbose {
println!("Preprocessing {} --> {}", input, output);
}
let mut w = File::create(output).unwrap();
let _ = w.write_all(loc.join("\n").as_bytes());
}
fn display_version() {
println!("Full Monkey v. 0.1");
println!(r" __");
println!(r"w c(..)o (");
println!(r" \__(-) __)");
println!(r" /\ (");
println!(r" /(_)___)");
println!(r" w /|");
println!(r" | \");
println!(r" m m");
println!("\nMonkey appears courtesy of ejm97:");
println!("http://www.ascii-art.de/ascii/mno/monkey.txt");
exit(0);
}
fn display_error(program: &str, error: &str) {
println!("Error: {}.", error);
display_usage(&program, -1);
}
fn display_usage(program: &str, exit_code: i32) {
println!("\nFull Monkey.");
println!("Generic preprocessor tool.");
println!("\nCopyright 2016 Sam Saint-Pettersen.");
println!("Released under the MIT/X11 License.");
println!("\n{} -f|--file <input> [-c|--condition <condition(s)>] -o|--out <output>", program);
println!("[-l|--verbose][-h|--help | -v|--version]");
println!("\n-f|--file: File to run preprocessing on.");
println!("-c|--conditon: Comma delimited list of conditon(s) to apply.");
println!("-o|--out: File to output preprocessed LoC to.");
println!("-l|--verbose: Display message to console on process.");
println!("-h|--help: Display this help information and exit.");
println!("-v|--version: Display program version and exit.");
exit(exit_code);
}
fn main() {
let cli = CliOptions::new("fm");
let program = cli.get_program();
let mut input = String::new();
let mut output = String::new();
let mut conditions = String::new();
let mut verbose = false;
if cli.get_num() > 1 {
for (i, a) in cli.get_args().iter().enumerate() {
match a.trim() {
"-h" | "--help" => display_usage(&program, 0),
"-v" | "--version" => display_version(),
"-f" | "--file" => input = cli.next_argument(i),
"-c" | "--condition" => conditions = cli.next_argument(i),
"-o" | "--out" => output = cli.next_argument(i),
"-l" | "--verbose" => verbose = true,
_ => continue,
}
}
if input.is_empty() {
display_error(&program, "No input specified");
}
if output.is_empty() {
display_error(&program, "No output specified");
}
preprocess(&input, &output, &conditions, verbose);
}
else {
display_error(&program, "No options specified");
}
}
| {
let mut loc: Vec<String> = Vec::new();
let mut preprocessed: Vec<String> = Vec::new();
let mut cond = String::new();
let mut set_conds: Vec<String> = Vec::new();
let mut prefixed: Vec<String> = Vec::new();
let mut prefixes: Vec<String> = Vec::new();
let mut in_pp = false;
let conditions = conditions.split(",");
for sc in conditions {
set_conds.push(sc.to_string());
}
let f = File::open(input).unwrap();
let file = BufReader::new(&f);
for l in file.lines() {
let l = l.unwrap();
// Process prefix... | identifier_body |
main.rs | /*
Full Monkey.
Generic preprocessor tool.
Copyright 2016 Sam Saint-Pettersen.
Released under the MIT/X11 License.
*/
extern crate clioptions;
extern crate regex;
use clioptions::CliOptions;
use regex::Regex;
use std::io::{BufRead, BufReader, Write};
use std::fs::File;
use std::process::exit;
fn preprocess(input: &str, output: &str, conditions: &str, verbose: bool) {
let mut loc: Vec<String> = Vec::new();
let mut preprocessed: Vec<String> = Vec::new();
let mut cond = String::new();
let mut set_conds: Vec<String> = Vec::new();
let mut prefixed: Vec<String> = Vec::new();
let mut prefixes: Vec<String> = Vec::new();
let mut in_pp = false;
let conditions = conditions.split(",");
for sc in conditions {
set_conds.push(sc.to_string());
}
let f = File::open(input).unwrap();
let file = BufReader::new(&f);
for l in file.lines() {
let l = l.unwrap();
// Process prefix...
let mut p = Regex::new("#prefix (.*) with (.*)").unwrap();
if p.is_match(&l) {
for cap in p.captures_iter(&l) {
prefixed.push(cap.at(1).unwrap().to_string());
prefixes.push(cap.at(2).unwrap().to_string());
}
continue;
}
// Process conditional (if/else/elseif)...
p = Regex::new("#[else]*[if]* (.*)").unwrap();
if p.is_match(&l) {
for cap in p.captures_iter(&l) {
cond = cap.at(1).unwrap().to_string();
in_pp = true;
}
continue;
}
// Process end block...
p = Regex::new("#[fi|endif]").unwrap();
if p.is_match(&l) {
in_pp = false;
continue;
}
// Push relevant LoC to vector...
for sc in set_conds.clone() {
if in_pp && cond == sc {
preprocessed.push(l.to_string());
} | preprocessed.push(l.to_string());
continue;
}
}
// Do any alterations:
for line in preprocessed {
let mut fl = line;
for (i, p) in prefixed.iter().enumerate() {
let r = Regex::new(®ex::quote(&p)).unwrap();
let repl = format!("{}{}", &prefixes[i], &p);
fl = r.replace_all(&fl, &repl[..]);
}
loc.push(fl);
}
loc.push(String::new());
if verbose {
println!("Preprocessing {} --> {}", input, output);
}
let mut w = File::create(output).unwrap();
let _ = w.write_all(loc.join("\n").as_bytes());
}
fn display_version() {
println!("Full Monkey v. 0.1");
println!(r" __");
println!(r"w c(..)o (");
println!(r" \__(-) __)");
println!(r" /\ (");
println!(r" /(_)___)");
println!(r" w /|");
println!(r" | \");
println!(r" m m");
println!("\nMonkey appears courtesy of ejm97:");
println!("http://www.ascii-art.de/ascii/mno/monkey.txt");
exit(0);
}
fn display_error(program: &str, error: &str) {
println!("Error: {}.", error);
display_usage(&program, -1);
}
fn display_usage(program: &str, exit_code: i32) {
println!("\nFull Monkey.");
println!("Generic preprocessor tool.");
println!("\nCopyright 2016 Sam Saint-Pettersen.");
println!("Released under the MIT/X11 License.");
println!("\n{} -f|--file <input> [-c|--condition <condition(s)>] -o|--out <output>", program);
println!("[-l|--verbose][-h|--help | -v|--version]");
println!("\n-f|--file: File to run preprocessing on.");
println!("-c|--conditon: Comma delimited list of conditon(s) to apply.");
println!("-o|--out: File to output preprocessed LoC to.");
println!("-l|--verbose: Display message to console on process.");
println!("-h|--help: Display this help information and exit.");
println!("-v|--version: Display program version and exit.");
exit(exit_code);
}
fn main() {
let cli = CliOptions::new("fm");
let program = cli.get_program();
let mut input = String::new();
let mut output = String::new();
let mut conditions = String::new();
let mut verbose = false;
if cli.get_num() > 1 {
for (i, a) in cli.get_args().iter().enumerate() {
match a.trim() {
"-h" | "--help" => display_usage(&program, 0),
"-v" | "--version" => display_version(),
"-f" | "--file" => input = cli.next_argument(i),
"-c" | "--condition" => conditions = cli.next_argument(i),
"-o" | "--out" => output = cli.next_argument(i),
"-l" | "--verbose" => verbose = true,
_ => continue,
}
}
if input.is_empty() {
display_error(&program, "No input specified");
}
if output.is_empty() {
display_error(&program, "No output specified");
}
preprocess(&input, &output, &conditions, verbose);
}
else {
display_error(&program, "No options specified");
}
} | }
if !in_pp { | random_line_split |
main.rs | /*
Full Monkey.
Generic preprocessor tool.
Copyright 2016 Sam Saint-Pettersen.
Released under the MIT/X11 License.
*/
extern crate clioptions;
extern crate regex;
use clioptions::CliOptions;
use regex::Regex;
use std::io::{BufRead, BufReader, Write};
use std::fs::File;
use std::process::exit;
fn preprocess(input: &str, output: &str, conditions: &str, verbose: bool) {
let mut loc: Vec<String> = Vec::new();
let mut preprocessed: Vec<String> = Vec::new();
let mut cond = String::new();
let mut set_conds: Vec<String> = Vec::new();
let mut prefixed: Vec<String> = Vec::new();
let mut prefixes: Vec<String> = Vec::new();
let mut in_pp = false;
let conditions = conditions.split(",");
for sc in conditions {
set_conds.push(sc.to_string());
}
let f = File::open(input).unwrap();
let file = BufReader::new(&f);
for l in file.lines() {
let l = l.unwrap();
// Process prefix...
let mut p = Regex::new("#prefix (.*) with (.*)").unwrap();
if p.is_match(&l) {
for cap in p.captures_iter(&l) {
prefixed.push(cap.at(1).unwrap().to_string());
prefixes.push(cap.at(2).unwrap().to_string());
}
continue;
}
// Process conditional (if/else/elseif)...
p = Regex::new("#[else]*[if]* (.*)").unwrap();
if p.is_match(&l) |
// Process end block...
p = Regex::new("#[fi|endif]").unwrap();
if p.is_match(&l) {
in_pp = false;
continue;
}
// Push relevant LoC to vector...
for sc in set_conds.clone() {
if in_pp && cond == sc {
preprocessed.push(l.to_string());
}
}
if!in_pp {
preprocessed.push(l.to_string());
continue;
}
}
// Do any alterations:
for line in preprocessed {
let mut fl = line;
for (i, p) in prefixed.iter().enumerate() {
let r = Regex::new(®ex::quote(&p)).unwrap();
let repl = format!("{}{}", &prefixes[i], &p);
fl = r.replace_all(&fl, &repl[..]);
}
loc.push(fl);
}
loc.push(String::new());
if verbose {
println!("Preprocessing {} --> {}", input, output);
}
let mut w = File::create(output).unwrap();
let _ = w.write_all(loc.join("\n").as_bytes());
}
fn display_version() {
println!("Full Monkey v. 0.1");
println!(r" __");
println!(r"w c(..)o (");
println!(r" \__(-) __)");
println!(r" /\ (");
println!(r" /(_)___)");
println!(r" w /|");
println!(r" | \");
println!(r" m m");
println!("\nMonkey appears courtesy of ejm97:");
println!("http://www.ascii-art.de/ascii/mno/monkey.txt");
exit(0);
}
fn display_error(program: &str, error: &str) {
println!("Error: {}.", error);
display_usage(&program, -1);
}
fn display_usage(program: &str, exit_code: i32) {
println!("\nFull Monkey.");
println!("Generic preprocessor tool.");
println!("\nCopyright 2016 Sam Saint-Pettersen.");
println!("Released under the MIT/X11 License.");
println!("\n{} -f|--file <input> [-c|--condition <condition(s)>] -o|--out <output>", program);
println!("[-l|--verbose][-h|--help | -v|--version]");
println!("\n-f|--file: File to run preprocessing on.");
println!("-c|--conditon: Comma delimited list of conditon(s) to apply.");
println!("-o|--out: File to output preprocessed LoC to.");
println!("-l|--verbose: Display message to console on process.");
println!("-h|--help: Display this help information and exit.");
println!("-v|--version: Display program version and exit.");
exit(exit_code);
}
fn main() {
let cli = CliOptions::new("fm");
let program = cli.get_program();
let mut input = String::new();
let mut output = String::new();
let mut conditions = String::new();
let mut verbose = false;
if cli.get_num() > 1 {
for (i, a) in cli.get_args().iter().enumerate() {
match a.trim() {
"-h" | "--help" => display_usage(&program, 0),
"-v" | "--version" => display_version(),
"-f" | "--file" => input = cli.next_argument(i),
"-c" | "--condition" => conditions = cli.next_argument(i),
"-o" | "--out" => output = cli.next_argument(i),
"-l" | "--verbose" => verbose = true,
_ => continue,
}
}
if input.is_empty() {
display_error(&program, "No input specified");
}
if output.is_empty() {
display_error(&program, "No output specified");
}
preprocess(&input, &output, &conditions, verbose);
}
else {
display_error(&program, "No options specified");
}
}
| {
for cap in p.captures_iter(&l) {
cond = cap.at(1).unwrap().to_string();
in_pp = true;
}
continue;
} | conditional_block |
get_room_information.rs | //! `GET /_matrix/federation/*/query/directory`
//!
//! Endpoint to query room information with a room alias.
pub mod v1 {
//! `/v1/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/server-server-api/#get_matrixfederationv1querydirectory
use ruma_common::{api::ruma_api, RoomAliasId, RoomId, ServerName};
ruma_api! {
metadata: {
description: "Get mapped room ID and resident homeservers for a given room alias.",
name: "get_room_information",
method: GET,
stable_path: "/_matrix/federation/v1/query/directory",
rate_limited: false,
authentication: ServerSignatures,
added: 1.0,
}
request: {
/// Room alias to query.
#[ruma_api(query)]
pub room_alias: &'a RoomAliasId,
}
response: {
/// Room ID mapped to queried alias.
pub room_id: Box<RoomId>,
/// An array of server names that are likely to hold the given room.
pub servers: Vec<Box<ServerName>>,
}
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given room alias ID.
pub fn new(room_alias: &'a RoomAliasId) -> Self {
Self { room_alias }
}
}
impl Response {
/// Creates a new `Response` with the given room IDs and servers.
pub fn new(room_id: Box<RoomId>, servers: Vec<Box<ServerName>>) -> Self |
}
}
| {
Self { room_id, servers }
} | identifier_body |
get_room_information.rs | //! `GET /_matrix/federation/*/query/directory`
//!
//! Endpoint to query room information with a room alias.
pub mod v1 {
//! `/v1/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/server-server-api/#get_matrixfederationv1querydirectory
use ruma_common::{api::ruma_api, RoomAliasId, RoomId, ServerName};
ruma_api! {
metadata: {
description: "Get mapped room ID and resident homeservers for a given room alias.",
name: "get_room_information",
method: GET,
stable_path: "/_matrix/federation/v1/query/directory",
rate_limited: false,
authentication: ServerSignatures,
added: 1.0,
}
request: {
/// Room alias to query.
#[ruma_api(query)]
pub room_alias: &'a RoomAliasId,
}
response: {
/// Room ID mapped to queried alias.
pub room_id: Box<RoomId>,
/// An array of server names that are likely to hold the given room.
pub servers: Vec<Box<ServerName>>,
}
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given room alias ID.
pub fn new(room_alias: &'a RoomAliasId) -> Self {
Self { room_alias }
}
}
impl Response {
/// Creates a new `Response` with the given room IDs and servers.
pub fn | (room_id: Box<RoomId>, servers: Vec<Box<ServerName>>) -> Self {
Self { room_id, servers }
}
}
}
| new | identifier_name |
get_room_information.rs | //! `GET /_matrix/federation/*/query/directory`
//!
//! Endpoint to query room information with a room alias.
pub mod v1 {
//! `/v1/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/server-server-api/#get_matrixfederationv1querydirectory
use ruma_common::{api::ruma_api, RoomAliasId, RoomId, ServerName};
ruma_api! {
metadata: {
description: "Get mapped room ID and resident homeservers for a given room alias.",
name: "get_room_information",
method: GET,
stable_path: "/_matrix/federation/v1/query/directory",
rate_limited: false,
authentication: ServerSignatures,
added: 1.0,
}
request: {
/// Room alias to query.
#[ruma_api(query)]
pub room_alias: &'a RoomAliasId,
}
response: {
/// Room ID mapped to queried alias.
pub room_id: Box<RoomId>,
/// An array of server names that are likely to hold the given room. | impl<'a> Request<'a> {
/// Creates a new `Request` with the given room alias ID.
pub fn new(room_alias: &'a RoomAliasId) -> Self {
Self { room_alias }
}
}
impl Response {
/// Creates a new `Response` with the given room IDs and servers.
pub fn new(room_id: Box<RoomId>, servers: Vec<Box<ServerName>>) -> Self {
Self { room_id, servers }
}
}
} | pub servers: Vec<Box<ServerName>>,
}
}
| random_line_split |
device_id.rs | #[cfg(feature = "rand")]
use super::generate_localpart;
/// A Matrix key ID.
///
/// Device identifiers in Matrix are completely opaque character sequences. This type is provided
/// simply for its semantic value.
///
/// # Example
///
/// ```
/// use ruma_common::{device_id, DeviceId};
///
/// let random_id = DeviceId::new();
/// assert_eq!(random_id.as_str().len(), 8);
///
/// let static_id = device_id!("01234567");
/// assert_eq!(static_id.as_str(), "01234567");
///
/// let ref_id: &DeviceId = "abcdefghi".into();
/// assert_eq!(ref_id.as_str(), "abcdefghi");
///
/// let owned_id: Box<DeviceId> = "ijklmnop".into();
/// assert_eq!(owned_id.as_str(), "ijklmnop");
/// ```
#[repr(transparent)]
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct DeviceId(str);
| opaque_identifier!(DeviceId);
impl DeviceId {
/// Generates a random `DeviceId`, suitable for assignment to a new device.
#[cfg(feature = "rand")]
pub fn new() -> Box<Self> {
Self::from_owned(generate_localpart(8))
}
}
#[cfg(all(test, feature = "rand"))]
mod tests {
use super::DeviceId;
#[test]
fn generate_device_id() {
assert_eq!(DeviceId::new().as_str().len(), 8);
}
#[test]
fn create_device_id_from_str() {
let ref_id: &DeviceId = "abcdefgh".into();
assert_eq!(ref_id.as_str(), "abcdefgh");
}
#[test]
fn create_boxed_device_id_from_str() {
let box_id: Box<DeviceId> = "12345678".into();
assert_eq!(box_id.as_str(), "12345678");
}
#[test]
fn create_device_id_from_box() {
let box_str: Box<str> = "ijklmnop".into();
let device_id: Box<DeviceId> = DeviceId::from_owned(box_str);
assert_eq!(device_id.as_str(), "ijklmnop");
}
} | random_line_split |
|
device_id.rs | #[cfg(feature = "rand")]
use super::generate_localpart;
/// A Matrix key ID.
///
/// Device identifiers in Matrix are completely opaque character sequences. This type is provided
/// simply for its semantic value.
///
/// # Example
///
/// ```
/// use ruma_common::{device_id, DeviceId};
///
/// let random_id = DeviceId::new();
/// assert_eq!(random_id.as_str().len(), 8);
///
/// let static_id = device_id!("01234567");
/// assert_eq!(static_id.as_str(), "01234567");
///
/// let ref_id: &DeviceId = "abcdefghi".into();
/// assert_eq!(ref_id.as_str(), "abcdefghi");
///
/// let owned_id: Box<DeviceId> = "ijklmnop".into();
/// assert_eq!(owned_id.as_str(), "ijklmnop");
/// ```
#[repr(transparent)]
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct DeviceId(str);
opaque_identifier!(DeviceId);
impl DeviceId {
/// Generates a random `DeviceId`, suitable for assignment to a new device.
#[cfg(feature = "rand")]
pub fn | () -> Box<Self> {
Self::from_owned(generate_localpart(8))
}
}
#[cfg(all(test, feature = "rand"))]
mod tests {
use super::DeviceId;
#[test]
fn generate_device_id() {
assert_eq!(DeviceId::new().as_str().len(), 8);
}
#[test]
fn create_device_id_from_str() {
let ref_id: &DeviceId = "abcdefgh".into();
assert_eq!(ref_id.as_str(), "abcdefgh");
}
#[test]
fn create_boxed_device_id_from_str() {
let box_id: Box<DeviceId> = "12345678".into();
assert_eq!(box_id.as_str(), "12345678");
}
#[test]
fn create_device_id_from_box() {
let box_str: Box<str> = "ijklmnop".into();
let device_id: Box<DeviceId> = DeviceId::from_owned(box_str);
assert_eq!(device_id.as_str(), "ijklmnop");
}
}
| new | identifier_name |
hud.rs | use super::wad_system::WadSystem;
use engine::{
ControlFlow, DependenciesFrom, Gesture, InfallibleSystem, Input, Scancode, TextId,
TextRenderer, Window,
};
use math::prelude::*;
use math::Pnt2f;
pub struct Bindings {
pub quit: Gesture,
pub next_level: Gesture,
pub previous_level: Gesture,
pub toggle_mouse: Gesture,
pub toggle_help: Gesture,
}
impl Default for Bindings {
fn default() -> Self {
Bindings {
quit: Gesture::AnyOf(vec![
Gesture::QuitTrigger,
Gesture::KeyTrigger(Scancode::Escape),
]),
next_level: Gesture::AllOf(vec![
Gesture::KeyHold(Scancode::LControl),
Gesture::KeyTrigger(Scancode::N),
]),
previous_level: Gesture::AllOf(vec![
Gesture::KeyHold(Scancode::LControl),
Gesture::KeyTrigger(Scancode::P),
]),
toggle_mouse: Gesture::KeyTrigger(Scancode::Grave),
toggle_help: Gesture::KeyTrigger(Scancode::H),
}
}
}
#[derive(DependenciesFrom)]
pub struct Dependencies<'context> {
bindings: &'context Bindings,
window: &'context Window,
input: &'context mut Input,
text: &'context mut TextRenderer,
control_flow: &'context mut ControlFlow,
wad: &'context mut WadSystem,
}
pub struct Hud {
mouse_grabbed: bool,
current_help: HelpState,
prompt_text: TextId,
help_text: TextId,
}
impl<'context> InfallibleSystem<'context> for Hud {
type Dependencies = Dependencies<'context>;
fn debug_name() -> &'static str {
"hud"
}
fn create(deps: Dependencies) -> Self {
deps.input.set_mouse_enabled(true);
deps.input.set_cursor_grabbed(true);
let prompt_text = deps
.text
.insert(deps.window, PROMPT_TEXT, Pnt2f::origin(), HELP_PADDING);
let help_text = deps
.text
.insert(deps.window, HELP_TEXT, Pnt2f::origin(), HELP_PADDING);
deps.text[help_text].set_visible(false);
Hud {
prompt_text,
help_text,
mouse_grabbed: true,
current_help: HelpState::Prompt,
}
}
fn update(&mut self, deps: Dependencies) {
let Dependencies {
input,
text,
control_flow,
bindings,
..
} = deps;
if input.poll_gesture(&bindings.quit) {
control_flow.quit_requested = true
}
if input.poll_gesture(&bindings.toggle_mouse) {
self.mouse_grabbed =!self.mouse_grabbed;
input.set_mouse_enabled(self.mouse_grabbed);
input.set_cursor_grabbed(self.mouse_grabbed);
}
if input.poll_gesture(&bindings.toggle_help) {
self.current_help = match self.current_help {
HelpState::Prompt => {
text[self.prompt_text].set_visible(false);
text[self.help_text].set_visible(true);
HelpState::Shown
}
HelpState::Shown => {
text[self.help_text].set_visible(false);
HelpState::Hidden
}
HelpState::Hidden => {
text[self.help_text].set_visible(true);
HelpState::Shown
}
};
}
if input.poll_gesture(&bindings.next_level) {
let index = deps.wad.level_index();
deps.wad.change_level(index + 1);
} else if input.poll_gesture(&bindings.previous_level) {
let index = deps.wad.level_index();
if index > 0 |
}
}
fn teardown(&mut self, deps: Dependencies) {
deps.text.remove(self.help_text);
deps.text.remove(self.prompt_text);
}
}
enum HelpState {
Prompt,
Shown,
Hidden,
}
const HELP_PADDING: u32 = 6;
const PROMPT_TEXT: &str = "WASD and mouse, 'E' to push/use, LB to shoot or 'h' for help.";
const HELP_TEXT: &str = r"Use WASD to move and the mouse or arrow keys to aim.
Other keys:
ESC - to quit
SPACEBAR - jump
E - push/interact/use
Left Click - shoot (only effect is to trigger gun-activated things)
` - to toggle mouse grab (backtick)
f - to toggle fly mode
c - to toggle clipping (wall collisions)
Ctrl-N - to change to next level (though using the exit will also do this!)
Ctrl-P - to change to previous level
h - toggle this help message";
| {
deps.wad.change_level(index - 1);
} | conditional_block |
hud.rs | use super::wad_system::WadSystem;
use engine::{
ControlFlow, DependenciesFrom, Gesture, InfallibleSystem, Input, Scancode, TextId,
TextRenderer, Window,
};
use math::prelude::*;
use math::Pnt2f;
pub struct Bindings {
pub quit: Gesture,
pub next_level: Gesture,
pub previous_level: Gesture,
pub toggle_mouse: Gesture,
pub toggle_help: Gesture,
}
impl Default for Bindings {
fn default() -> Self {
Bindings {
quit: Gesture::AnyOf(vec![
Gesture::QuitTrigger,
Gesture::KeyTrigger(Scancode::Escape),
]),
next_level: Gesture::AllOf(vec![
Gesture::KeyHold(Scancode::LControl),
Gesture::KeyTrigger(Scancode::N),
]),
previous_level: Gesture::AllOf(vec![
Gesture::KeyHold(Scancode::LControl),
Gesture::KeyTrigger(Scancode::P),
]),
toggle_mouse: Gesture::KeyTrigger(Scancode::Grave),
toggle_help: Gesture::KeyTrigger(Scancode::H),
}
}
}
#[derive(DependenciesFrom)]
pub struct Dependencies<'context> {
bindings: &'context Bindings,
window: &'context Window,
input: &'context mut Input,
text: &'context mut TextRenderer,
control_flow: &'context mut ControlFlow,
wad: &'context mut WadSystem,
}
pub struct Hud {
mouse_grabbed: bool,
current_help: HelpState,
prompt_text: TextId,
help_text: TextId,
}
impl<'context> InfallibleSystem<'context> for Hud {
type Dependencies = Dependencies<'context>;
fn debug_name() -> &'static str {
"hud"
}
fn create(deps: Dependencies) -> Self {
deps.input.set_mouse_enabled(true);
deps.input.set_cursor_grabbed(true);
let prompt_text = deps
.text
.insert(deps.window, PROMPT_TEXT, Pnt2f::origin(), HELP_PADDING);
let help_text = deps
.text
.insert(deps.window, HELP_TEXT, Pnt2f::origin(), HELP_PADDING);
deps.text[help_text].set_visible(false);
Hud {
prompt_text,
help_text,
mouse_grabbed: true,
current_help: HelpState::Prompt,
}
}
fn update(&mut self, deps: Dependencies) {
let Dependencies {
input,
text,
control_flow,
bindings,
..
} = deps;
if input.poll_gesture(&bindings.quit) {
control_flow.quit_requested = true
}
if input.poll_gesture(&bindings.toggle_mouse) {
self.mouse_grabbed =!self.mouse_grabbed;
input.set_mouse_enabled(self.mouse_grabbed);
input.set_cursor_grabbed(self.mouse_grabbed);
}
if input.poll_gesture(&bindings.toggle_help) {
self.current_help = match self.current_help {
HelpState::Prompt => {
text[self.prompt_text].set_visible(false);
text[self.help_text].set_visible(true);
HelpState::Shown
}
HelpState::Shown => {
text[self.help_text].set_visible(false);
HelpState::Hidden
}
HelpState::Hidden => {
text[self.help_text].set_visible(true);
HelpState::Shown
}
};
}
if input.poll_gesture(&bindings.next_level) {
let index = deps.wad.level_index();
deps.wad.change_level(index + 1);
} else if input.poll_gesture(&bindings.previous_level) {
let index = deps.wad.level_index();
if index > 0 {
deps.wad.change_level(index - 1);
}
}
}
fn | (&mut self, deps: Dependencies) {
deps.text.remove(self.help_text);
deps.text.remove(self.prompt_text);
}
}
enum HelpState {
Prompt,
Shown,
Hidden,
}
const HELP_PADDING: u32 = 6;
const PROMPT_TEXT: &str = "WASD and mouse, 'E' to push/use, LB to shoot or 'h' for help.";
const HELP_TEXT: &str = r"Use WASD to move and the mouse or arrow keys to aim.
Other keys:
ESC - to quit
SPACEBAR - jump
E - push/interact/use
Left Click - shoot (only effect is to trigger gun-activated things)
` - to toggle mouse grab (backtick)
f - to toggle fly mode
c - to toggle clipping (wall collisions)
Ctrl-N - to change to next level (though using the exit will also do this!)
Ctrl-P - to change to previous level
h - toggle this help message";
| teardown | identifier_name |
hud.rs | use super::wad_system::WadSystem;
use engine::{
ControlFlow, DependenciesFrom, Gesture, InfallibleSystem, Input, Scancode, TextId,
TextRenderer, Window,
};
use math::prelude::*;
use math::Pnt2f;
pub struct Bindings {
pub quit: Gesture,
pub next_level: Gesture,
pub previous_level: Gesture,
pub toggle_mouse: Gesture,
pub toggle_help: Gesture,
}
impl Default for Bindings {
fn default() -> Self {
Bindings {
quit: Gesture::AnyOf(vec![
Gesture::QuitTrigger,
Gesture::KeyTrigger(Scancode::Escape),
]),
next_level: Gesture::AllOf(vec![
Gesture::KeyHold(Scancode::LControl),
Gesture::KeyTrigger(Scancode::N),
]),
previous_level: Gesture::AllOf(vec![
Gesture::KeyHold(Scancode::LControl),
Gesture::KeyTrigger(Scancode::P),
]),
toggle_mouse: Gesture::KeyTrigger(Scancode::Grave),
toggle_help: Gesture::KeyTrigger(Scancode::H),
}
}
}
#[derive(DependenciesFrom)]
pub struct Dependencies<'context> {
bindings: &'context Bindings,
window: &'context Window,
input: &'context mut Input,
text: &'context mut TextRenderer,
control_flow: &'context mut ControlFlow,
wad: &'context mut WadSystem,
}
pub struct Hud {
mouse_grabbed: bool,
current_help: HelpState,
prompt_text: TextId,
help_text: TextId,
}
impl<'context> InfallibleSystem<'context> for Hud {
type Dependencies = Dependencies<'context>;
fn debug_name() -> &'static str {
"hud"
}
fn create(deps: Dependencies) -> Self {
deps.input.set_mouse_enabled(true);
deps.input.set_cursor_grabbed(true);
let prompt_text = deps
.text
.insert(deps.window, PROMPT_TEXT, Pnt2f::origin(), HELP_PADDING);
let help_text = deps
.text
.insert(deps.window, HELP_TEXT, Pnt2f::origin(), HELP_PADDING);
deps.text[help_text].set_visible(false);
Hud {
prompt_text,
help_text,
mouse_grabbed: true,
current_help: HelpState::Prompt,
}
}
fn update(&mut self, deps: Dependencies) {
let Dependencies {
input,
text,
control_flow,
bindings,
..
} = deps;
if input.poll_gesture(&bindings.quit) {
control_flow.quit_requested = true
}
if input.poll_gesture(&bindings.toggle_mouse) {
self.mouse_grabbed =!self.mouse_grabbed;
input.set_mouse_enabled(self.mouse_grabbed);
input.set_cursor_grabbed(self.mouse_grabbed);
}
if input.poll_gesture(&bindings.toggle_help) {
self.current_help = match self.current_help {
HelpState::Prompt => { | text[self.prompt_text].set_visible(false);
text[self.help_text].set_visible(true);
HelpState::Shown
}
HelpState::Shown => {
text[self.help_text].set_visible(false);
HelpState::Hidden
}
HelpState::Hidden => {
text[self.help_text].set_visible(true);
HelpState::Shown
}
};
}
if input.poll_gesture(&bindings.next_level) {
let index = deps.wad.level_index();
deps.wad.change_level(index + 1);
} else if input.poll_gesture(&bindings.previous_level) {
let index = deps.wad.level_index();
if index > 0 {
deps.wad.change_level(index - 1);
}
}
}
fn teardown(&mut self, deps: Dependencies) {
deps.text.remove(self.help_text);
deps.text.remove(self.prompt_text);
}
}
enum HelpState {
Prompt,
Shown,
Hidden,
}
const HELP_PADDING: u32 = 6;
const PROMPT_TEXT: &str = "WASD and mouse, 'E' to push/use, LB to shoot or 'h' for help.";
const HELP_TEXT: &str = r"Use WASD to move and the mouse or arrow keys to aim.
Other keys:
ESC - to quit
SPACEBAR - jump
E - push/interact/use
Left Click - shoot (only effect is to trigger gun-activated things)
` - to toggle mouse grab (backtick)
f - to toggle fly mode
c - to toggle clipping (wall collisions)
Ctrl-N - to change to next level (though using the exit will also do this!)
Ctrl-P - to change to previous level
h - toggle this help message"; | random_line_split |
|
custom_tests.rs | extern crate rusoto_mock;
use crate::generated::{LexRuntime, LexRuntimeClient, PostTextRequest, PostTextResponse}; | #[tokio::test]
async fn test_post_text_resposnse_serialization() {
let mock_resp_body = r#"{
"dialogState": "ElicitSlot",
"intentName": "BookCar",
"message": "In what city do you need to rent a car?",
"messageFormat": "PlainText",
"responseCard": null,
"sessionAttributes": {},
"slotToElicit": "PickUpCity",
"slots": {
"CarType": null,
"PickUpCity": "Boston"
}
}"#;
let mock_request = MockRequestDispatcher::with_status(200).with_body(mock_resp_body);
let lex_client =
LexRuntimeClient::new_with(mock_request, MockCredentialsProvider, Region::UsEast1);
let post_text_req = PostTextRequest {
input_text: "Book a car".to_owned(),
user_id: "rs".to_owned(),
..Default::default()
};
let mut slots = HashMap::new();
slots.insert("CarType".to_owned(), None);
slots.insert("PickUpCity".to_owned(), Some("Boston".to_owned()));
let expected = PostTextResponse {
active_contexts: None,
alternative_intents: None,
bot_version: None,
dialog_state: Some("ElicitSlot".to_owned()),
intent_name: Some("BookCar".to_owned()),
message: Some("In what city do you need to rent a car?".to_owned()),
message_format: Some("PlainText".to_owned()),
nlu_intent_confidence: None,
slot_to_elicit: Some("PickUpCity".to_owned()),
slots: Some(slots),
response_card: None,
session_attributes: Some(HashMap::new()),
sentiment_response: None,
session_id: None,
};
let result: PostTextResponse = lex_client.post_text(post_text_req).await.unwrap();
assert_eq!(result, expected);
} | use rusoto_core::Region;
use std::collections::HashMap;
use self::rusoto_mock::*;
| random_line_split |
custom_tests.rs | extern crate rusoto_mock;
use crate::generated::{LexRuntime, LexRuntimeClient, PostTextRequest, PostTextResponse};
use rusoto_core::Region;
use std::collections::HashMap;
use self::rusoto_mock::*;
#[tokio::test]
async fn | () {
let mock_resp_body = r#"{
"dialogState": "ElicitSlot",
"intentName": "BookCar",
"message": "In what city do you need to rent a car?",
"messageFormat": "PlainText",
"responseCard": null,
"sessionAttributes": {},
"slotToElicit": "PickUpCity",
"slots": {
"CarType": null,
"PickUpCity": "Boston"
}
}"#;
let mock_request = MockRequestDispatcher::with_status(200).with_body(mock_resp_body);
let lex_client =
LexRuntimeClient::new_with(mock_request, MockCredentialsProvider, Region::UsEast1);
let post_text_req = PostTextRequest {
input_text: "Book a car".to_owned(),
user_id: "rs".to_owned(),
..Default::default()
};
let mut slots = HashMap::new();
slots.insert("CarType".to_owned(), None);
slots.insert("PickUpCity".to_owned(), Some("Boston".to_owned()));
let expected = PostTextResponse {
active_contexts: None,
alternative_intents: None,
bot_version: None,
dialog_state: Some("ElicitSlot".to_owned()),
intent_name: Some("BookCar".to_owned()),
message: Some("In what city do you need to rent a car?".to_owned()),
message_format: Some("PlainText".to_owned()),
nlu_intent_confidence: None,
slot_to_elicit: Some("PickUpCity".to_owned()),
slots: Some(slots),
response_card: None,
session_attributes: Some(HashMap::new()),
sentiment_response: None,
session_id: None,
};
let result: PostTextResponse = lex_client.post_text(post_text_req).await.unwrap();
assert_eq!(result, expected);
}
| test_post_text_resposnse_serialization | identifier_name |
custom_tests.rs | extern crate rusoto_mock;
use crate::generated::{LexRuntime, LexRuntimeClient, PostTextRequest, PostTextResponse};
use rusoto_core::Region;
use std::collections::HashMap;
use self::rusoto_mock::*;
#[tokio::test]
async fn test_post_text_resposnse_serialization() | input_text: "Book a car".to_owned(),
user_id: "rs".to_owned(),
..Default::default()
};
let mut slots = HashMap::new();
slots.insert("CarType".to_owned(), None);
slots.insert("PickUpCity".to_owned(), Some("Boston".to_owned()));
let expected = PostTextResponse {
active_contexts: None,
alternative_intents: None,
bot_version: None,
dialog_state: Some("ElicitSlot".to_owned()),
intent_name: Some("BookCar".to_owned()),
message: Some("In what city do you need to rent a car?".to_owned()),
message_format: Some("PlainText".to_owned()),
nlu_intent_confidence: None,
slot_to_elicit: Some("PickUpCity".to_owned()),
slots: Some(slots),
response_card: None,
session_attributes: Some(HashMap::new()),
sentiment_response: None,
session_id: None,
};
let result: PostTextResponse = lex_client.post_text(post_text_req).await.unwrap();
assert_eq!(result, expected);
}
| {
let mock_resp_body = r#"{
"dialogState": "ElicitSlot",
"intentName": "BookCar",
"message": "In what city do you need to rent a car?",
"messageFormat": "PlainText",
"responseCard": null,
"sessionAttributes": {},
"slotToElicit": "PickUpCity",
"slots": {
"CarType": null,
"PickUpCity": "Boston"
}
}"#;
let mock_request = MockRequestDispatcher::with_status(200).with_body(mock_resp_body);
let lex_client =
LexRuntimeClient::new_with(mock_request, MockCredentialsProvider, Region::UsEast1);
let post_text_req = PostTextRequest { | identifier_body |
roundsd.rs | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn roundsd_1() {
run_test(&Instruction { mnemonic: Mnemonic::ROUNDSD, operand1: Some(Direct(XMM0)), operand2: Some(Direct(XMM0)), operand3: Some(Literal8(73)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 11, 192, 73], OperandSize::Dword)
}
fn roundsd_2() {
run_test(&Instruction { mnemonic: Mnemonic::ROUNDSD, operand1: Some(Direct(XMM6)), operand2: Some(IndirectDisplaced(EAX, 455015242, Some(OperandSize::Qword), None)), operand3: Some(Literal8(66)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 11, 176, 74, 251, 30, 27, 66], OperandSize::Dword)
}
fn roundsd_3() {
run_test(&Instruction { mnemonic: Mnemonic::ROUNDSD, operand1: Some(Direct(XMM7)), operand2: Some(Direct(XMM1)), operand3: Some(Literal8(21)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 11, 249, 21], OperandSize::Qword)
} |
fn roundsd_4() {
run_test(&Instruction { mnemonic: Mnemonic::ROUNDSD, operand1: Some(Direct(XMM6)), operand2: Some(IndirectScaledIndexedDisplaced(RDX, RCX, Eight, 1515642711, Some(OperandSize::Qword), None)), operand3: Some(Literal8(102)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 11, 180, 202, 87, 223, 86, 90, 102], OperandSize::Qword)
} | random_line_split |
|
roundsd.rs | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn roundsd_1() {
run_test(&Instruction { mnemonic: Mnemonic::ROUNDSD, operand1: Some(Direct(XMM0)), operand2: Some(Direct(XMM0)), operand3: Some(Literal8(73)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 11, 192, 73], OperandSize::Dword)
}
fn | () {
run_test(&Instruction { mnemonic: Mnemonic::ROUNDSD, operand1: Some(Direct(XMM6)), operand2: Some(IndirectDisplaced(EAX, 455015242, Some(OperandSize::Qword), None)), operand3: Some(Literal8(66)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 11, 176, 74, 251, 30, 27, 66], OperandSize::Dword)
}
fn roundsd_3() {
run_test(&Instruction { mnemonic: Mnemonic::ROUNDSD, operand1: Some(Direct(XMM7)), operand2: Some(Direct(XMM1)), operand3: Some(Literal8(21)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 11, 249, 21], OperandSize::Qword)
}
fn roundsd_4() {
run_test(&Instruction { mnemonic: Mnemonic::ROUNDSD, operand1: Some(Direct(XMM6)), operand2: Some(IndirectScaledIndexedDisplaced(RDX, RCX, Eight, 1515642711, Some(OperandSize::Qword), None)), operand3: Some(Literal8(102)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 11, 180, 202, 87, 223, 86, 90, 102], OperandSize::Qword)
}
| roundsd_2 | identifier_name |
roundsd.rs | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn roundsd_1() {
run_test(&Instruction { mnemonic: Mnemonic::ROUNDSD, operand1: Some(Direct(XMM0)), operand2: Some(Direct(XMM0)), operand3: Some(Literal8(73)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 11, 192, 73], OperandSize::Dword)
}
fn roundsd_2() {
run_test(&Instruction { mnemonic: Mnemonic::ROUNDSD, operand1: Some(Direct(XMM6)), operand2: Some(IndirectDisplaced(EAX, 455015242, Some(OperandSize::Qword), None)), operand3: Some(Literal8(66)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 11, 176, 74, 251, 30, 27, 66], OperandSize::Dword)
}
fn roundsd_3() {
run_test(&Instruction { mnemonic: Mnemonic::ROUNDSD, operand1: Some(Direct(XMM7)), operand2: Some(Direct(XMM1)), operand3: Some(Literal8(21)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 11, 249, 21], OperandSize::Qword)
}
fn roundsd_4() | {
run_test(&Instruction { mnemonic: Mnemonic::ROUNDSD, operand1: Some(Direct(XMM6)), operand2: Some(IndirectScaledIndexedDisplaced(RDX, RCX, Eight, 1515642711, Some(OperandSize::Qword), None)), operand3: Some(Literal8(102)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 11, 180, 202, 87, 223, 86, 90, 102], OperandSize::Qword)
} | identifier_body |
|
process.rs | use std::io::process::{Command,ProcessOutput};
fn main() | let s = String::from_utf8_lossy(err.as_slice());
print!("rustc failed and stderr was:\n{}", s);
}
},
}
}
| {
// Initial command `rustc`
let mut cmd = Command::new("rustc");
// append the "--version" flag to the command
cmd.arg("--version");
// The `output` method will spawn `rustc --version`, wait until the process
// finishes and return the output of the process
match cmd.output() {
Err(why) => panic!("couldn't spawn rustc: {}", why.desc),
// Destructure `ProcessOutput`
Ok(ProcessOutput { error: err, output: out, status: exit }) => {
// Check if the process succeeded, i.e. the exit code was 0
if exit.success() {
// `out` has type `Vec<u8>`, convert it to a UTF-8 `$str`
let s = String::from_utf8_lossy(out.as_slice());
print!("rustc succeeded and stdout was:\n{}", s);
} else {
// `err` also has type `Vec<u8>` | identifier_body |
process.rs | use std::io::process::{Command,ProcessOutput};
fn main() {
// Initial command `rustc`
let mut cmd = Command::new("rustc");
// append the "--version" flag to the command
cmd.arg("--version");
| Ok(ProcessOutput { error: err, output: out, status: exit }) => {
// Check if the process succeeded, i.e. the exit code was 0
if exit.success() {
// `out` has type `Vec<u8>`, convert it to a UTF-8 `$str`
let s = String::from_utf8_lossy(out.as_slice());
print!("rustc succeeded and stdout was:\n{}", s);
} else {
// `err` also has type `Vec<u8>`
let s = String::from_utf8_lossy(err.as_slice());
print!("rustc failed and stderr was:\n{}", s);
}
},
}
} | // The `output` method will spawn `rustc --version`, wait until the process
// finishes and return the output of the process
match cmd.output() {
Err(why) => panic!("couldn't spawn rustc: {}", why.desc),
// Destructure `ProcessOutput` | random_line_split |
process.rs | use std::io::process::{Command,ProcessOutput};
fn | () {
// Initial command `rustc`
let mut cmd = Command::new("rustc");
// append the "--version" flag to the command
cmd.arg("--version");
// The `output` method will spawn `rustc --version`, wait until the process
// finishes and return the output of the process
match cmd.output() {
Err(why) => panic!("couldn't spawn rustc: {}", why.desc),
// Destructure `ProcessOutput`
Ok(ProcessOutput { error: err, output: out, status: exit }) => {
// Check if the process succeeded, i.e. the exit code was 0
if exit.success() {
// `out` has type `Vec<u8>`, convert it to a UTF-8 `$str`
let s = String::from_utf8_lossy(out.as_slice());
print!("rustc succeeded and stdout was:\n{}", s);
} else {
// `err` also has type `Vec<u8>`
let s = String::from_utf8_lossy(err.as_slice());
print!("rustc failed and stderr was:\n{}", s);
}
},
}
}
| main | identifier_name |
gcs.rs | _acl(attrs.get("predefined_acl").unwrap_or(def))
.or_invalid_input("invalid predefined_acl")?;
let storage_class = parse_storage_class(&none_to_empty(bucket.storage_class.clone()))
.or_invalid_input("invalid storage_class")?;
let credentials_blob_opt = StringNonEmpty::opt(
attrs
.get("credentials_blob")
.unwrap_or(&"".to_string())
.to_string(),
);
let svc_info = if let Some(cred) = credentials_blob_opt {
Some(deserialize_service_account_info(cred)?)
} else {
None
};
Ok(Config {
bucket,
predefined_acl,
svc_info,
storage_class,
})
}
pub fn from_input(input: InputConfig) -> io::Result<Config> {
let endpoint = StringNonEmpty::opt(input.endpoint);
let bucket = BucketConf {
endpoint,
bucket: StringNonEmpty::required_field(input.bucket, "bucket")?,
prefix: StringNonEmpty::opt(input.prefix),
storage_class: StringNonEmpty::opt(input.storage_class),
region: None,
};
let predefined_acl = parse_predefined_acl(&input.predefined_acl)
.or_invalid_input("invalid predefined_acl")?;
let storage_class = parse_storage_class(&none_to_empty(bucket.storage_class.clone()))
.or_invalid_input("invalid storage_class")?;
let svc_info = if let Some(cred) = StringNonEmpty::opt(input.credentials_blob) {
Some(deserialize_service_account_info(cred)?)
} else {
None
};
Ok(Config {
bucket,
predefined_acl,
svc_info,
storage_class,
})
}
}
fn deserialize_service_account_info(
cred: StringNonEmpty,
) -> std::result::Result<ServiceAccountInfo, RequestError> {
ServiceAccountInfo::deserialize(cred.to_string())
.map_err(|e| RequestError::OAuth(e, "deserialize ServiceAccountInfo".to_string()))
}
impl BlobConfig for Config {
fn name(&self) -> &'static str {
STORAGE_NAME
}
fn url(&self) -> io::Result<url::Url> {
self.bucket.url("gcs").map_err(|s| {
io::Error::new(
io::ErrorKind::Other,
format!("error creating bucket url: {}", s),
)
})
}
}
// GCS compatible storage
#[derive(Clone)]
pub struct GCSStorage {
config: Config,
svc_access: Option<Arc<ServiceAccountAccess>>,
client: Client<HttpsConnector<HttpConnector>, Body>,
}
trait ResultExt {
type Ok;
// Maps the error of this result as an `std::io::Error` with `Other` error
// kind.
fn or_io_error<D: Display>(self, msg: D) -> io::Result<Self::Ok>;
// Maps the error of this result as an `std::io::Error` with `InvalidInput`
// error kind.
fn or_invalid_input<D: Display>(self, msg: D) -> io::Result<Self::Ok>;
}
impl<T, E: Display> ResultExt for Result<T, E> {
type Ok = T;
fn or_io_error<D: Display>(self, msg: D) -> io::Result<T> {
self.map_err(|e| io::Error::new(io::ErrorKind::Other, format!("{}: {}", msg, e)))
}
fn or_invalid_input<D: Display>(self, msg: D) -> io::Result<T> {
self.map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, format!("{}: {}", msg, e)))
}
}
enum RequestError {
Hyper(hyper::Error, String),
OAuth(tame_oauth::Error, String),
Gcs(tame_gcs::Error),
InvalidEndpoint(http::uri::InvalidUri),
}
impl From<http::uri::InvalidUri> for RequestError {
fn from(err: http::uri::InvalidUri) -> Self {
Self::InvalidEndpoint(err)
}
}
fn status_code_error(code: StatusCode, msg: String) -> RequestError {
RequestError::OAuth(tame_oauth::Error::HttpStatus(code), msg)
}
impl From<RequestError> for io::Error {
fn from(err: RequestError) -> Self {
match err {
RequestError::Hyper(e, msg) => {
Self::new(io::ErrorKind::InvalidInput, format!("HTTP {}: {}", msg, e))
}
RequestError::OAuth(tame_oauth::Error::Io(e), _) => e,
RequestError::OAuth(tame_oauth::Error::HttpStatus(sc), msg) => {
let fmt = format!("GCS OAuth: {}: {}", msg, sc);
match sc.as_u16() {
401 | 403 => Self::new(io::ErrorKind::PermissionDenied, fmt),
404 => Self::new(io::ErrorKind::NotFound, fmt),
_ if sc.is_server_error() => Self::new(io::ErrorKind::Interrupted, fmt),
_ => Self::new(io::ErrorKind::InvalidInput, fmt),
}
}
RequestError::OAuth(tame_oauth::Error::AuthError(e), msg) => Self::new(
io::ErrorKind::PermissionDenied,
format!("authorization failed: {}: {}", msg, e),
),
RequestError::OAuth(e, msg) => Self::new(
io::ErrorKind::InvalidInput,
format!("oauth failed: {}: {}", msg, e),
),
RequestError::Gcs(e) => Self::new(
io::ErrorKind::InvalidInput,
format!("invalid GCS request: {}", e),
),
RequestError::InvalidEndpoint(e) => Self::new(
io::ErrorKind::InvalidInput,
format!("invalid GCS endpoint URI: {}", e),
),
}
}
}
impl RetryError for RequestError {
fn is_retryable(&self) -> bool {
match self {
// FIXME: Inspect the error source?
Self::Hyper(e, _) => {
e.is_closed()
|| e.is_connect()
|| e.is_incomplete_message()
|| e.is_body_write_aborted()
}
// See https://cloud.google.com/storage/docs/exponential-backoff.
Self::OAuth(tame_oauth::Error::HttpStatus(StatusCode::TOO_MANY_REQUESTS), _) => true,
Self::OAuth(tame_oauth::Error::HttpStatus(StatusCode::REQUEST_TIMEOUT), _) => true,
Self::OAuth(tame_oauth::Error::HttpStatus(status), _) => status.is_server_error(),
// Consider everything else not retryable.
_ => false,
}
}
}
impl GCSStorage {
pub fn from_input(input: InputConfig) -> io::Result<Self> {
Self::new(Config::from_input(input)?)
}
pub fn from_cloud_dynamic(cloud_dynamic: &CloudDynamic) -> io::Result<Self> {
Self::new(Config::from_cloud_dynamic(cloud_dynamic)?)
}
/// Create a new GCS storage for the given config.
pub fn new(config: Config) -> io::Result<GCSStorage> {
let svc_access = if let Some(si) = &config.svc_info {
Some(
ServiceAccountAccess::new(si.clone())
.or_invalid_input("invalid credentials_blob")?,
)
} else {
None
};
let client = Client::builder().build(HttpsConnector::new());
Ok(GCSStorage {
config,
svc_access: svc_access.map(Arc::new),
client,
})
}
fn maybe_prefix_key(&self, key: &str) -> String {
if let Some(prefix) = &self.config.bucket.prefix {
return format!("{}/{}", prefix, key);
}
key.to_owned()
}
async fn set_auth(
&self,
req: &mut Request<Body>,
scope: tame_gcs::Scopes,
svc_access: Arc<ServiceAccountAccess>,
) -> Result<(), RequestError> {
let token_or_request = svc_access
.get_token(&[scope])
.map_err(|e| RequestError::OAuth(e, "get_token".to_string()))?;
let token = match token_or_request {
TokenOrRequest::Token(token) => token,
TokenOrRequest::Request {
request,
scope_hash,
..
} => {
let res = self
.client
.request(request.map(From::from))
.await
.map_err(|e| RequestError::Hyper(e, "set auth request".to_owned()))?;
if!res.status().is_success() |
let (parts, body) = res.into_parts();
let body = hyper::body::to_bytes(body)
.await
.map_err(|e| RequestError::Hyper(e, "set auth body".to_owned()))?;
svc_access
.parse_token_response(scope_hash, Response::from_parts(parts, body))
.map_err(|e| RequestError::OAuth(e, "set auth parse token".to_string()))?
}
};
req.headers_mut().insert(
http::header::AUTHORIZATION,
token
.try_into()
.map_err(|e| RequestError::OAuth(e, "set auth add auth header".to_string()))?,
);
Ok(())
}
async fn make_request(
&self,
mut req: Request<Body>,
scope: tame_gcs::Scopes,
) -> Result<Response<Body>, RequestError> {
// replace the hard-coded GCS endpoint by the custom one.
if let Some(endpoint) = &self.config.bucket.endpoint {
let uri = req.uri().to_string();
let new_url_opt = change_host(endpoint, &uri);
if let Some(new_url) = new_url_opt {
*req.uri_mut() = new_url.parse()?;
}
}
if let Some(svc_access) = &self.svc_access {
self.set_auth(&mut req, scope, svc_access.clone()).await?;
}
let uri = req.uri().to_string();
let res = self
.client
.request(req)
.await
.map_err(|e| RequestError::Hyper(e, uri.clone()))?;
if!res.status().is_success() {
return Err(status_code_error(res.status(), uri));
}
Ok(res)
}
fn error_to_async_read<E>(kind: io::ErrorKind, e: E) -> Box<dyn AsyncRead + Unpin>
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Box::new(error_stream(io::Error::new(kind, e)).into_async_read())
}
}
fn change_host(host: &StringNonEmpty, url: &str) -> Option<String> {
let new_host = (|| {
for hardcoded in HARDCODED_ENDPOINTS_SUFFIX {
if let Some(res) = host.strip_suffix(hardcoded) {
return StringNonEmpty::opt(res.to_owned()).unwrap();
}
}
host.to_owned()
})();
if let Some(res) = url.strip_prefix(GOOGLE_APIS) {
return Some([new_host.trim_end_matches('/'), res].concat());
}
None
}
// Convert manually since they don't implement FromStr.
fn parse_storage_class(sc: &str) -> Result<Option<StorageClass>, &str> {
Ok(Some(match sc {
"" => return Ok(None),
"STANDARD" => StorageClass::Standard,
"NEARLINE" => StorageClass::Nearline,
"COLDLINE" => StorageClass::Coldline,
"DURABLE_REDUCED_AVAILABILITY" => StorageClass::DurableReducedAvailability,
"REGIONAL" => StorageClass::Regional,
"MULTI_REGIONAL" => StorageClass::MultiRegional,
_ => return Err(sc),
}))
}
fn parse_predefined_acl(acl: &str) -> Result<Option<PredefinedAcl>, &str> {
Ok(Some(match acl {
"" => return Ok(None),
"authenticatedRead" => PredefinedAcl::AuthenticatedRead,
"bucketOwnerFullControl" => PredefinedAcl::BucketOwnerFullControl,
"bucketOwnerRead" => PredefinedAcl::BucketOwnerRead,
"private" => PredefinedAcl::Private,
"projectPrivate" => PredefinedAcl::ProjectPrivate,
"publicRead" => PredefinedAcl::PublicRead,
_ => return Err(acl),
}))
}
const STORAGE_NAME: &str = "gcs";
impl BlobStorage for GCSStorage {
fn config(&self) -> Box<dyn BlobConfig> {
Box::new(self.config.clone()) as Box<dyn BlobConfig>
}
fn put(
&self,
name: &str,
mut reader: Box<dyn AsyncRead + Send + Unpin>,
content_length: u64,
) -> io::Result<()> {
if content_length == 0 {
// It is probably better to just write the empty file
// However, currently going forward results in a body write aborted error
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"no content to write",
));
}
use std::convert::TryFrom;
let key = self.maybe_prefix_key(name);
debug!("save file to GCS storage"; "key" => %key);
let bucket = BucketName::try_from(self.config.bucket.bucket.to_string())
.or_invalid_input(format_args!("invalid bucket {}", self.config.bucket.bucket))?;
let metadata = Metadata {
name: Some(key),
storage_class: self.config.storage_class,
..Default::default()
};
block_on_external_io(async move {
// FIXME: Switch to upload() API so we don't need to read the entire data into memory
// in order to retry.
let mut data = Vec::with_capacity(content_length as usize);
reader.read_to_end(&mut data).await?;
retry(|| async {
let data = Cursor::new(data.clone());
let req = Object::insert_multipart(
&bucket,
data,
content_length,
&metadata,
Some(InsertObjectOptional {
predefined_acl: self.config.predefined_acl,
..Default::default()
}),
)
.map_err(RequestError::Gcs)?
.map(|reader| Body::wrap_stream(AsyncReadAsSyncStreamOfBytes::new(reader)));
self.make_request(req, tame_gcs::Scopes::ReadWrite).await
})
.await?;
Ok::<_, io::Error>(())
})?;
Ok(())
}
fn get(&self, name: &str) -> Box<dyn AsyncRead + Unpin + '_> {
let bucket = self.config.bucket.bucket.to_string();
let name = self.maybe_prefix_key(name);
debug!("read file from GCS storage"; "key" => %name);
let oid = match ObjectId::new(bucket, name) {
Ok(oid) => oid,
Err(e) => return GCSStorage::error_to_async_read(io::ErrorKind::InvalidInput, e),
};
let request = match Object::download(&oid, None /*optional*/) {
Ok(request) => request.map(|_: io::Empty| Body::empty()),
Err(e) => return GCSStorage::error_to_async_read(io::ErrorKind::Other, e),
};
Box::new(
self.make_request(request, tame_gcs::Scopes::ReadOnly)
.and_then(|response| async {
if response.status().is_success() {
Ok(response.into_body().map_err(|e| {
io::Error::new(
io::ErrorKind::Other,
format!("download from GCS error: {}", e),
)
}))
} else {
Err(status_code_error(
response.status(),
"bucket read".to_string(),
))
}
})
.err_into::<io::Error>()
.try_flatten_stream()
.boxed() // this `.boxed()` pin the stream.
.into_async_read(),
)
}
}
#[cfg(test)]
mod tests {
use super::*;
use matches::assert_matches;
const HARDCODED_ENDPOINTS: &[&str] = &[
"https://www.googleapis.com/upload/storage/v1",
"https://www.googleapis.com/storage/v1",
];
#[test]
fn test_change_host() {
let host = StringNonEmpty::static_str("http://localhost:4443");
assert_eq!(
&change_host(&host, &format!("{}/storage/v1/foo", GOOGLE_APIS)).unwrap(),
"http://localhost:4443/storage/v1/foo"
);
let h1 = url::Url::parse(HARDCODED_ENDPOINTS[0]).unwrap();
let h2 = url::Url::parse(HARDCODED_ENDPOINTS[1]).unwrap();
let endpoint = StringNonEmpty::static_str("http://example.com");
assert_eq!(
&change_host(&endpoint, h1.as_str()).unwrap(),
"http://example.com/upload/storage/v1"
);
assert_eq!(
&change_host(&endpoint, h2.as_str()).unwrap(),
"http://example.com/storage/v1"
);
assert_eq!(
&change_host(&endpoint, &format!("{}/foo", h2)).unwrap(),
"http://example.com/storage/v1/foo"
);
assert_matches!(&change_host(&endpoint, "foo"), None);
// if we get the endpoint with suffix "/storage/v1/"
let endpoint = StringNonEmpty::static_str("http://example.com/storage/v1/");
assert_eq!(
&change_host(&endpoint, &format!("{}/foo", h2)). | {
return Err(status_code_error(
res.status(),
"set auth request".to_string(),
));
} | conditional_block |
gcs.rs |
fn deserialize_service_account_info(
cred: StringNonEmpty,
) -> std::result::Result<ServiceAccountInfo, RequestError> {
ServiceAccountInfo::deserialize(cred.to_string())
.map_err(|e| RequestError::OAuth(e, "deserialize ServiceAccountInfo".to_string()))
}
impl BlobConfig for Config {
fn name(&self) -> &'static str {
STORAGE_NAME
}
fn url(&self) -> io::Result<url::Url> {
self.bucket.url("gcs").map_err(|s| {
io::Error::new(
io::ErrorKind::Other,
format!("error creating bucket url: {}", s),
)
})
}
}
// GCS compatible storage
#[derive(Clone)]
pub struct GCSStorage {
config: Config,
svc_access: Option<Arc<ServiceAccountAccess>>,
client: Client<HttpsConnector<HttpConnector>, Body>,
}
trait ResultExt {
type Ok;
// Maps the error of this result as an `std::io::Error` with `Other` error
// kind.
fn or_io_error<D: Display>(self, msg: D) -> io::Result<Self::Ok>;
// Maps the error of this result as an `std::io::Error` with `InvalidInput`
// error kind.
fn or_invalid_input<D: Display>(self, msg: D) -> io::Result<Self::Ok>;
}
impl<T, E: Display> ResultExt for Result<T, E> {
type Ok = T;
fn or_io_error<D: Display>(self, msg: D) -> io::Result<T> {
self.map_err(|e| io::Error::new(io::ErrorKind::Other, format!("{}: {}", msg, e)))
}
fn or_invalid_input<D: Display>(self, msg: D) -> io::Result<T> {
self.map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, format!("{}: {}", msg, e)))
}
}
enum RequestError {
Hyper(hyper::Error, String),
OAuth(tame_oauth::Error, String),
Gcs(tame_gcs::Error),
InvalidEndpoint(http::uri::InvalidUri),
}
impl From<http::uri::InvalidUri> for RequestError {
fn from(err: http::uri::InvalidUri) -> Self {
Self::InvalidEndpoint(err)
}
}
fn status_code_error(code: StatusCode, msg: String) -> RequestError {
RequestError::OAuth(tame_oauth::Error::HttpStatus(code), msg)
}
impl From<RequestError> for io::Error {
fn from(err: RequestError) -> Self {
match err {
RequestError::Hyper(e, msg) => {
Self::new(io::ErrorKind::InvalidInput, format!("HTTP {}: {}", msg, e))
}
RequestError::OAuth(tame_oauth::Error::Io(e), _) => e,
RequestError::OAuth(tame_oauth::Error::HttpStatus(sc), msg) => {
let fmt = format!("GCS OAuth: {}: {}", msg, sc);
match sc.as_u16() {
401 | 403 => Self::new(io::ErrorKind::PermissionDenied, fmt),
404 => Self::new(io::ErrorKind::NotFound, fmt),
_ if sc.is_server_error() => Self::new(io::ErrorKind::Interrupted, fmt),
_ => Self::new(io::ErrorKind::InvalidInput, fmt),
}
}
RequestError::OAuth(tame_oauth::Error::AuthError(e), msg) => Self::new(
io::ErrorKind::PermissionDenied,
format!("authorization failed: {}: {}", msg, e),
),
RequestError::OAuth(e, msg) => Self::new(
io::ErrorKind::InvalidInput,
format!("oauth failed: {}: {}", msg, e),
),
RequestError::Gcs(e) => Self::new(
io::ErrorKind::InvalidInput,
format!("invalid GCS request: {}", e),
),
RequestError::InvalidEndpoint(e) => Self::new(
io::ErrorKind::InvalidInput,
format!("invalid GCS endpoint URI: {}", e),
),
}
}
}
impl RetryError for RequestError {
fn is_retryable(&self) -> bool {
match self {
// FIXME: Inspect the error source?
Self::Hyper(e, _) => {
e.is_closed()
|| e.is_connect()
|| e.is_incomplete_message()
|| e.is_body_write_aborted()
}
// See https://cloud.google.com/storage/docs/exponential-backoff.
Self::OAuth(tame_oauth::Error::HttpStatus(StatusCode::TOO_MANY_REQUESTS), _) => true,
Self::OAuth(tame_oauth::Error::HttpStatus(StatusCode::REQUEST_TIMEOUT), _) => true,
Self::OAuth(tame_oauth::Error::HttpStatus(status), _) => status.is_server_error(),
// Consider everything else not retryable.
_ => false,
}
}
}
impl GCSStorage {
pub fn from_input(input: InputConfig) -> io::Result<Self> {
Self::new(Config::from_input(input)?)
}
pub fn from_cloud_dynamic(cloud_dynamic: &CloudDynamic) -> io::Result<Self> {
Self::new(Config::from_cloud_dynamic(cloud_dynamic)?)
}
/// Create a new GCS storage for the given config.
pub fn new(config: Config) -> io::Result<GCSStorage> {
let svc_access = if let Some(si) = &config.svc_info {
Some(
ServiceAccountAccess::new(si.clone())
.or_invalid_input("invalid credentials_blob")?,
)
} else {
None
};
let client = Client::builder().build(HttpsConnector::new());
Ok(GCSStorage {
config,
svc_access: svc_access.map(Arc::new),
client,
})
}
fn maybe_prefix_key(&self, key: &str) -> String {
if let Some(prefix) = &self.config.bucket.prefix {
return format!("{}/{}", prefix, key);
}
key.to_owned()
}
async fn set_auth(
&self,
req: &mut Request<Body>,
scope: tame_gcs::Scopes,
svc_access: Arc<ServiceAccountAccess>,
) -> Result<(), RequestError> {
let token_or_request = svc_access
.get_token(&[scope])
.map_err(|e| RequestError::OAuth(e, "get_token".to_string()))?;
let token = match token_or_request {
TokenOrRequest::Token(token) => token,
TokenOrRequest::Request {
request,
scope_hash,
..
} => {
let res = self
.client
.request(request.map(From::from))
.await
.map_err(|e| RequestError::Hyper(e, "set auth request".to_owned()))?;
if!res.status().is_success() {
return Err(status_code_error(
res.status(),
"set auth request".to_string(),
));
}
let (parts, body) = res.into_parts();
let body = hyper::body::to_bytes(body)
.await
.map_err(|e| RequestError::Hyper(e, "set auth body".to_owned()))?;
svc_access
.parse_token_response(scope_hash, Response::from_parts(parts, body))
.map_err(|e| RequestError::OAuth(e, "set auth parse token".to_string()))?
}
};
req.headers_mut().insert(
http::header::AUTHORIZATION,
token
.try_into()
.map_err(|e| RequestError::OAuth(e, "set auth add auth header".to_string()))?,
);
Ok(())
}
async fn make_request(
&self,
mut req: Request<Body>,
scope: tame_gcs::Scopes,
) -> Result<Response<Body>, RequestError> {
// replace the hard-coded GCS endpoint by the custom one.
if let Some(endpoint) = &self.config.bucket.endpoint {
let uri = req.uri().to_string();
let new_url_opt = change_host(endpoint, &uri);
if let Some(new_url) = new_url_opt {
*req.uri_mut() = new_url.parse()?;
}
}
if let Some(svc_access) = &self.svc_access {
self.set_auth(&mut req, scope, svc_access.clone()).await?;
}
let uri = req.uri().to_string();
let res = self
.client
.request(req)
.await
.map_err(|e| RequestError::Hyper(e, uri.clone()))?;
if!res.status().is_success() {
return Err(status_code_error(res.status(), uri));
}
Ok(res)
}
fn error_to_async_read<E>(kind: io::ErrorKind, e: E) -> Box<dyn AsyncRead + Unpin>
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Box::new(error_stream(io::Error::new(kind, e)).into_async_read())
}
}
fn change_host(host: &StringNonEmpty, url: &str) -> Option<String> {
let new_host = (|| {
for hardcoded in HARDCODED_ENDPOINTS_SUFFIX {
if let Some(res) = host.strip_suffix(hardcoded) {
return StringNonEmpty::opt(res.to_owned()).unwrap();
}
}
host.to_owned()
})();
if let Some(res) = url.strip_prefix(GOOGLE_APIS) {
return Some([new_host.trim_end_matches('/'), res].concat());
}
None
}
// Convert manually since they don't implement FromStr.
fn parse_storage_class(sc: &str) -> Result<Option<StorageClass>, &str> {
Ok(Some(match sc {
"" => return Ok(None),
"STANDARD" => StorageClass::Standard,
"NEARLINE" => StorageClass::Nearline,
"COLDLINE" => StorageClass::Coldline,
"DURABLE_REDUCED_AVAILABILITY" => StorageClass::DurableReducedAvailability,
"REGIONAL" => StorageClass::Regional,
"MULTI_REGIONAL" => StorageClass::MultiRegional,
_ => return Err(sc),
}))
}
fn parse_predefined_acl(acl: &str) -> Result<Option<PredefinedAcl>, &str> {
Ok(Some(match acl {
"" => return Ok(None),
"authenticatedRead" => PredefinedAcl::AuthenticatedRead,
"bucketOwnerFullControl" => PredefinedAcl::BucketOwnerFullControl,
"bucketOwnerRead" => PredefinedAcl::BucketOwnerRead,
"private" => PredefinedAcl::Private,
"projectPrivate" => PredefinedAcl::ProjectPrivate,
"publicRead" => PredefinedAcl::PublicRead,
_ => return Err(acl),
}))
}
const STORAGE_NAME: &str = "gcs";
impl BlobStorage for GCSStorage {
fn config(&self) -> Box<dyn BlobConfig> {
Box::new(self.config.clone()) as Box<dyn BlobConfig>
}
fn put(
&self,
name: &str,
mut reader: Box<dyn AsyncRead + Send + Unpin>,
content_length: u64,
) -> io::Result<()> {
if content_length == 0 {
// It is probably better to just write the empty file
// However, currently going forward results in a body write aborted error
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"no content to write",
));
}
use std::convert::TryFrom;
let key = self.maybe_prefix_key(name);
debug!("save file to GCS storage"; "key" => %key);
let bucket = BucketName::try_from(self.config.bucket.bucket.to_string())
.or_invalid_input(format_args!("invalid bucket {}", self.config.bucket.bucket))?;
let metadata = Metadata {
name: Some(key),
storage_class: self.config.storage_class,
..Default::default()
};
block_on_external_io(async move {
// FIXME: Switch to upload() API so we don't need to read the entire data into memory
// in order to retry.
let mut data = Vec::with_capacity(content_length as usize);
reader.read_to_end(&mut data).await?;
retry(|| async {
let data = Cursor::new(data.clone());
let req = Object::insert_multipart(
&bucket,
data,
content_length,
&metadata,
Some(InsertObjectOptional {
predefined_acl: self.config.predefined_acl,
..Default::default()
}),
)
.map_err(RequestError::Gcs)?
.map(|reader| Body::wrap_stream(AsyncReadAsSyncStreamOfBytes::new(reader)));
self.make_request(req, tame_gcs::Scopes::ReadWrite).await
})
.await?;
Ok::<_, io::Error>(())
})?;
Ok(())
}
fn get(&self, name: &str) -> Box<dyn AsyncRead + Unpin + '_> {
let bucket = self.config.bucket.bucket.to_string();
let name = self.maybe_prefix_key(name);
debug!("read file from GCS storage"; "key" => %name);
let oid = match ObjectId::new(bucket, name) {
Ok(oid) => oid,
Err(e) => return GCSStorage::error_to_async_read(io::ErrorKind::InvalidInput, e),
};
let request = match Object::download(&oid, None /*optional*/) {
Ok(request) => request.map(|_: io::Empty| Body::empty()),
Err(e) => return GCSStorage::error_to_async_read(io::ErrorKind::Other, e),
};
Box::new(
self.make_request(request, tame_gcs::Scopes::ReadOnly)
.and_then(|response| async {
if response.status().is_success() {
Ok(response.into_body().map_err(|e| {
io::Error::new(
io::ErrorKind::Other,
format!("download from GCS error: {}", e),
)
}))
} else {
Err(status_code_error(
response.status(),
"bucket read".to_string(),
))
}
})
.err_into::<io::Error>()
.try_flatten_stream()
.boxed() // this `.boxed()` pin the stream.
.into_async_read(),
)
}
}
#[cfg(test)]
mod tests {
use super::*;
use matches::assert_matches;
const HARDCODED_ENDPOINTS: &[&str] = &[
"https://www.googleapis.com/upload/storage/v1",
"https://www.googleapis.com/storage/v1",
];
#[test]
fn test_change_host() {
let host = StringNonEmpty::static_str("http://localhost:4443");
assert_eq!(
&change_host(&host, &format!("{}/storage/v1/foo", GOOGLE_APIS)).unwrap(),
"http://localhost:4443/storage/v1/foo"
);
let h1 = url::Url::parse(HARDCODED_ENDPOINTS[0]).unwrap();
let h2 = url::Url::parse(HARDCODED_ENDPOINTS[1]).unwrap();
let endpoint = StringNonEmpty::static_str("http://example.com");
assert_eq!(
&change_host(&endpoint, h1.as_str()).unwrap(),
"http://example.com/upload/storage/v1"
);
assert_eq!(
&change_host(&endpoint, h2.as_str()).unwrap(),
"http://example.com/storage/v1"
);
assert_eq!(
&change_host(&endpoint, &format!("{}/foo", h2)).unwrap(),
"http://example.com/storage/v1/foo"
);
assert_matches!(&change_host(&endpoint, "foo"), None);
// if we get the endpoint with suffix "/storage/v1/"
let endpoint = StringNonEmpty::static_str("http://example.com/storage/v1/");
assert_eq!(
&change_host(&endpoint, &format!("{}/foo", h2)).unwrap(),
"http://example.com/storage/v1/foo"
);
let endpoint = StringNonEmpty::static_str("http://example.com/upload/storage/v1/");
assert_eq!(
&change_host(&endpoint, &format!("{}/foo", h2)).unwrap(),
"http://example.com/storage/v1/foo"
);
}
#[test]
fn test_parse_storage_class() {
assert_matches!(
parse_storage_class("STANDARD"),
Ok(Some(StorageClass::Standard))
);
assert_matches!(parse_storage_class(""), Ok(None));
assert_matches!(
parse_storage_class("NOT_A_STORAGE_CLASS"),
Err("NOT_A_STORAGE_CLASS")
);
}
#[test]
fn test_parse_acl() {
// can't use assert_matches!(), PredefinedAcl doesn't even implement Debug.
assert!(matches!(
parse_predefined_acl("private"),
Ok(Some(PredefinedAcl::Private))
));
assert!(matches!(parse_predefined_acl(""), Ok(None)));
assert!(matches!(parse_predefined_acl("notAnACL"), Err("notAnACL")));
}
#[test]
fn test_url_of_backend() {
let bucket_name = StringNonEmpty::static_str("bucket");
let mut bucket = BucketConf::default(bucket_name);
bucket.prefix = Some(StringNonEmpty::static_str("/backup 02/prefix/"));
let gcs = Config::default(bucket.clone());
// only 'bucket' and 'prefix' should be visible in url_of_backend()
assert_eq!(
gcs.url().unwrap().to_string(),
"gcs://bucket/backup%2002/prefix/"
);
bucket.endpoint = Some(StringNonEmpty::static_str("http://endpoint.com"));
assert_eq!(
&Config::default(bucket).url().unwrap().to_string(),
"http://endpoint.com/bucket/backup%2002/prefix/"
);
}
#[test]
fn | test_config_round_trip | identifier_name |
|
gcs.rs | _predefined_acl(attrs.get("predefined_acl").unwrap_or(def))
.or_invalid_input("invalid predefined_acl")?;
let storage_class = parse_storage_class(&none_to_empty(bucket.storage_class.clone()))
.or_invalid_input("invalid storage_class")?;
let credentials_blob_opt = StringNonEmpty::opt(
attrs
.get("credentials_blob")
.unwrap_or(&"".to_string())
.to_string(),
);
let svc_info = if let Some(cred) = credentials_blob_opt {
Some(deserialize_service_account_info(cred)?)
} else {
None
};
Ok(Config {
bucket,
predefined_acl,
svc_info,
storage_class,
})
}
pub fn from_input(input: InputConfig) -> io::Result<Config> {
let endpoint = StringNonEmpty::opt(input.endpoint);
let bucket = BucketConf {
endpoint,
bucket: StringNonEmpty::required_field(input.bucket, "bucket")?,
prefix: StringNonEmpty::opt(input.prefix),
storage_class: StringNonEmpty::opt(input.storage_class),
region: None,
};
let predefined_acl = parse_predefined_acl(&input.predefined_acl)
.or_invalid_input("invalid predefined_acl")?;
let storage_class = parse_storage_class(&none_to_empty(bucket.storage_class.clone()))
.or_invalid_input("invalid storage_class")?;
let svc_info = if let Some(cred) = StringNonEmpty::opt(input.credentials_blob) {
Some(deserialize_service_account_info(cred)?)
} else {
None
};
Ok(Config {
bucket,
predefined_acl,
svc_info, |
fn deserialize_service_account_info(
cred: StringNonEmpty,
) -> std::result::Result<ServiceAccountInfo, RequestError> {
ServiceAccountInfo::deserialize(cred.to_string())
.map_err(|e| RequestError::OAuth(e, "deserialize ServiceAccountInfo".to_string()))
}
impl BlobConfig for Config {
fn name(&self) -> &'static str {
STORAGE_NAME
}
fn url(&self) -> io::Result<url::Url> {
self.bucket.url("gcs").map_err(|s| {
io::Error::new(
io::ErrorKind::Other,
format!("error creating bucket url: {}", s),
)
})
}
}
// GCS compatible storage
#[derive(Clone)]
pub struct GCSStorage {
config: Config,
svc_access: Option<Arc<ServiceAccountAccess>>,
client: Client<HttpsConnector<HttpConnector>, Body>,
}
trait ResultExt {
type Ok;
// Maps the error of this result as an `std::io::Error` with `Other` error
// kind.
fn or_io_error<D: Display>(self, msg: D) -> io::Result<Self::Ok>;
// Maps the error of this result as an `std::io::Error` with `InvalidInput`
// error kind.
fn or_invalid_input<D: Display>(self, msg: D) -> io::Result<Self::Ok>;
}
impl<T, E: Display> ResultExt for Result<T, E> {
type Ok = T;
fn or_io_error<D: Display>(self, msg: D) -> io::Result<T> {
self.map_err(|e| io::Error::new(io::ErrorKind::Other, format!("{}: {}", msg, e)))
}
fn or_invalid_input<D: Display>(self, msg: D) -> io::Result<T> {
self.map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, format!("{}: {}", msg, e)))
}
}
enum RequestError {
Hyper(hyper::Error, String),
OAuth(tame_oauth::Error, String),
Gcs(tame_gcs::Error),
InvalidEndpoint(http::uri::InvalidUri),
}
impl From<http::uri::InvalidUri> for RequestError {
fn from(err: http::uri::InvalidUri) -> Self {
Self::InvalidEndpoint(err)
}
}
fn status_code_error(code: StatusCode, msg: String) -> RequestError {
RequestError::OAuth(tame_oauth::Error::HttpStatus(code), msg)
}
impl From<RequestError> for io::Error {
fn from(err: RequestError) -> Self {
match err {
RequestError::Hyper(e, msg) => {
Self::new(io::ErrorKind::InvalidInput, format!("HTTP {}: {}", msg, e))
}
RequestError::OAuth(tame_oauth::Error::Io(e), _) => e,
RequestError::OAuth(tame_oauth::Error::HttpStatus(sc), msg) => {
let fmt = format!("GCS OAuth: {}: {}", msg, sc);
match sc.as_u16() {
401 | 403 => Self::new(io::ErrorKind::PermissionDenied, fmt),
404 => Self::new(io::ErrorKind::NotFound, fmt),
_ if sc.is_server_error() => Self::new(io::ErrorKind::Interrupted, fmt),
_ => Self::new(io::ErrorKind::InvalidInput, fmt),
}
}
RequestError::OAuth(tame_oauth::Error::AuthError(e), msg) => Self::new(
io::ErrorKind::PermissionDenied,
format!("authorization failed: {}: {}", msg, e),
),
RequestError::OAuth(e, msg) => Self::new(
io::ErrorKind::InvalidInput,
format!("oauth failed: {}: {}", msg, e),
),
RequestError::Gcs(e) => Self::new(
io::ErrorKind::InvalidInput,
format!("invalid GCS request: {}", e),
),
RequestError::InvalidEndpoint(e) => Self::new(
io::ErrorKind::InvalidInput,
format!("invalid GCS endpoint URI: {}", e),
),
}
}
}
impl RetryError for RequestError {
fn is_retryable(&self) -> bool {
match self {
// FIXME: Inspect the error source?
Self::Hyper(e, _) => {
e.is_closed()
|| e.is_connect()
|| e.is_incomplete_message()
|| e.is_body_write_aborted()
}
// See https://cloud.google.com/storage/docs/exponential-backoff.
Self::OAuth(tame_oauth::Error::HttpStatus(StatusCode::TOO_MANY_REQUESTS), _) => true,
Self::OAuth(tame_oauth::Error::HttpStatus(StatusCode::REQUEST_TIMEOUT), _) => true,
Self::OAuth(tame_oauth::Error::HttpStatus(status), _) => status.is_server_error(),
// Consider everything else not retryable.
_ => false,
}
}
}
impl GCSStorage {
pub fn from_input(input: InputConfig) -> io::Result<Self> {
Self::new(Config::from_input(input)?)
}
pub fn from_cloud_dynamic(cloud_dynamic: &CloudDynamic) -> io::Result<Self> {
Self::new(Config::from_cloud_dynamic(cloud_dynamic)?)
}
/// Create a new GCS storage for the given config.
pub fn new(config: Config) -> io::Result<GCSStorage> {
let svc_access = if let Some(si) = &config.svc_info {
Some(
ServiceAccountAccess::new(si.clone())
.or_invalid_input("invalid credentials_blob")?,
)
} else {
None
};
let client = Client::builder().build(HttpsConnector::new());
Ok(GCSStorage {
config,
svc_access: svc_access.map(Arc::new),
client,
})
}
fn maybe_prefix_key(&self, key: &str) -> String {
if let Some(prefix) = &self.config.bucket.prefix {
return format!("{}/{}", prefix, key);
}
key.to_owned()
}
async fn set_auth(
&self,
req: &mut Request<Body>,
scope: tame_gcs::Scopes,
svc_access: Arc<ServiceAccountAccess>,
) -> Result<(), RequestError> {
let token_or_request = svc_access
.get_token(&[scope])
.map_err(|e| RequestError::OAuth(e, "get_token".to_string()))?;
let token = match token_or_request {
TokenOrRequest::Token(token) => token,
TokenOrRequest::Request {
request,
scope_hash,
..
} => {
let res = self
.client
.request(request.map(From::from))
.await
.map_err(|e| RequestError::Hyper(e, "set auth request".to_owned()))?;
if!res.status().is_success() {
return Err(status_code_error(
res.status(),
"set auth request".to_string(),
));
}
let (parts, body) = res.into_parts();
let body = hyper::body::to_bytes(body)
.await
.map_err(|e| RequestError::Hyper(e, "set auth body".to_owned()))?;
svc_access
.parse_token_response(scope_hash, Response::from_parts(parts, body))
.map_err(|e| RequestError::OAuth(e, "set auth parse token".to_string()))?
}
};
req.headers_mut().insert(
http::header::AUTHORIZATION,
token
.try_into()
.map_err(|e| RequestError::OAuth(e, "set auth add auth header".to_string()))?,
);
Ok(())
}
async fn make_request(
&self,
mut req: Request<Body>,
scope: tame_gcs::Scopes,
) -> Result<Response<Body>, RequestError> {
// replace the hard-coded GCS endpoint by the custom one.
if let Some(endpoint) = &self.config.bucket.endpoint {
let uri = req.uri().to_string();
let new_url_opt = change_host(endpoint, &uri);
if let Some(new_url) = new_url_opt {
*req.uri_mut() = new_url.parse()?;
}
}
if let Some(svc_access) = &self.svc_access {
self.set_auth(&mut req, scope, svc_access.clone()).await?;
}
let uri = req.uri().to_string();
let res = self
.client
.request(req)
.await
.map_err(|e| RequestError::Hyper(e, uri.clone()))?;
if!res.status().is_success() {
return Err(status_code_error(res.status(), uri));
}
Ok(res)
}
fn error_to_async_read<E>(kind: io::ErrorKind, e: E) -> Box<dyn AsyncRead + Unpin>
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Box::new(error_stream(io::Error::new(kind, e)).into_async_read())
}
}
fn change_host(host: &StringNonEmpty, url: &str) -> Option<String> {
let new_host = (|| {
for hardcoded in HARDCODED_ENDPOINTS_SUFFIX {
if let Some(res) = host.strip_suffix(hardcoded) {
return StringNonEmpty::opt(res.to_owned()).unwrap();
}
}
host.to_owned()
})();
if let Some(res) = url.strip_prefix(GOOGLE_APIS) {
return Some([new_host.trim_end_matches('/'), res].concat());
}
None
}
// Convert manually since they don't implement FromStr.
fn parse_storage_class(sc: &str) -> Result<Option<StorageClass>, &str> {
Ok(Some(match sc {
"" => return Ok(None),
"STANDARD" => StorageClass::Standard,
"NEARLINE" => StorageClass::Nearline,
"COLDLINE" => StorageClass::Coldline,
"DURABLE_REDUCED_AVAILABILITY" => StorageClass::DurableReducedAvailability,
"REGIONAL" => StorageClass::Regional,
"MULTI_REGIONAL" => StorageClass::MultiRegional,
_ => return Err(sc),
}))
}
fn parse_predefined_acl(acl: &str) -> Result<Option<PredefinedAcl>, &str> {
Ok(Some(match acl {
"" => return Ok(None),
"authenticatedRead" => PredefinedAcl::AuthenticatedRead,
"bucketOwnerFullControl" => PredefinedAcl::BucketOwnerFullControl,
"bucketOwnerRead" => PredefinedAcl::BucketOwnerRead,
"private" => PredefinedAcl::Private,
"projectPrivate" => PredefinedAcl::ProjectPrivate,
"publicRead" => PredefinedAcl::PublicRead,
_ => return Err(acl),
}))
}
const STORAGE_NAME: &str = "gcs";
impl BlobStorage for GCSStorage {
fn config(&self) -> Box<dyn BlobConfig> {
Box::new(self.config.clone()) as Box<dyn BlobConfig>
}
fn put(
&self,
name: &str,
mut reader: Box<dyn AsyncRead + Send + Unpin>,
content_length: u64,
) -> io::Result<()> {
if content_length == 0 {
// It is probably better to just write the empty file
// However, currently going forward results in a body write aborted error
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"no content to write",
));
}
use std::convert::TryFrom;
let key = self.maybe_prefix_key(name);
debug!("save file to GCS storage"; "key" => %key);
let bucket = BucketName::try_from(self.config.bucket.bucket.to_string())
.or_invalid_input(format_args!("invalid bucket {}", self.config.bucket.bucket))?;
let metadata = Metadata {
name: Some(key),
storage_class: self.config.storage_class,
..Default::default()
};
block_on_external_io(async move {
// FIXME: Switch to upload() API so we don't need to read the entire data into memory
// in order to retry.
let mut data = Vec::with_capacity(content_length as usize);
reader.read_to_end(&mut data).await?;
retry(|| async {
let data = Cursor::new(data.clone());
let req = Object::insert_multipart(
&bucket,
data,
content_length,
&metadata,
Some(InsertObjectOptional {
predefined_acl: self.config.predefined_acl,
..Default::default()
}),
)
.map_err(RequestError::Gcs)?
.map(|reader| Body::wrap_stream(AsyncReadAsSyncStreamOfBytes::new(reader)));
self.make_request(req, tame_gcs::Scopes::ReadWrite).await
})
.await?;
Ok::<_, io::Error>(())
})?;
Ok(())
}
fn get(&self, name: &str) -> Box<dyn AsyncRead + Unpin + '_> {
let bucket = self.config.bucket.bucket.to_string();
let name = self.maybe_prefix_key(name);
debug!("read file from GCS storage"; "key" => %name);
let oid = match ObjectId::new(bucket, name) {
Ok(oid) => oid,
Err(e) => return GCSStorage::error_to_async_read(io::ErrorKind::InvalidInput, e),
};
let request = match Object::download(&oid, None /*optional*/) {
Ok(request) => request.map(|_: io::Empty| Body::empty()),
Err(e) => return GCSStorage::error_to_async_read(io::ErrorKind::Other, e),
};
Box::new(
self.make_request(request, tame_gcs::Scopes::ReadOnly)
.and_then(|response| async {
if response.status().is_success() {
Ok(response.into_body().map_err(|e| {
io::Error::new(
io::ErrorKind::Other,
format!("download from GCS error: {}", e),
)
}))
} else {
Err(status_code_error(
response.status(),
"bucket read".to_string(),
))
}
})
.err_into::<io::Error>()
.try_flatten_stream()
.boxed() // this `.boxed()` pin the stream.
.into_async_read(),
)
}
}
#[cfg(test)]
mod tests {
use super::*;
use matches::assert_matches;
const HARDCODED_ENDPOINTS: &[&str] = &[
"https://www.googleapis.com/upload/storage/v1",
"https://www.googleapis.com/storage/v1",
];
#[test]
fn test_change_host() {
let host = StringNonEmpty::static_str("http://localhost:4443");
assert_eq!(
&change_host(&host, &format!("{}/storage/v1/foo", GOOGLE_APIS)).unwrap(),
"http://localhost:4443/storage/v1/foo"
);
let h1 = url::Url::parse(HARDCODED_ENDPOINTS[0]).unwrap();
let h2 = url::Url::parse(HARDCODED_ENDPOINTS[1]).unwrap();
let endpoint = StringNonEmpty::static_str("http://example.com");
assert_eq!(
&change_host(&endpoint, h1.as_str()).unwrap(),
"http://example.com/upload/storage/v1"
);
assert_eq!(
&change_host(&endpoint, h2.as_str()).unwrap(),
"http://example.com/storage/v1"
);
assert_eq!(
&change_host(&endpoint, &format!("{}/foo", h2)).unwrap(),
"http://example.com/storage/v1/foo"
);
assert_matches!(&change_host(&endpoint, "foo"), None);
// if we get the endpoint with suffix "/storage/v1/"
let endpoint = StringNonEmpty::static_str("http://example.com/storage/v1/");
assert_eq!(
&change_host(&endpoint, &format!("{}/foo", h2)).unwrap | storage_class,
})
}
} | random_line_split |
gcs.rs | _acl(attrs.get("predefined_acl").unwrap_or(def))
.or_invalid_input("invalid predefined_acl")?;
let storage_class = parse_storage_class(&none_to_empty(bucket.storage_class.clone()))
.or_invalid_input("invalid storage_class")?;
let credentials_blob_opt = StringNonEmpty::opt(
attrs
.get("credentials_blob")
.unwrap_or(&"".to_string())
.to_string(),
);
let svc_info = if let Some(cred) = credentials_blob_opt {
Some(deserialize_service_account_info(cred)?)
} else {
None
};
Ok(Config {
bucket,
predefined_acl,
svc_info,
storage_class,
})
}
pub fn from_input(input: InputConfig) -> io::Result<Config> {
let endpoint = StringNonEmpty::opt(input.endpoint);
let bucket = BucketConf {
endpoint,
bucket: StringNonEmpty::required_field(input.bucket, "bucket")?,
prefix: StringNonEmpty::opt(input.prefix),
storage_class: StringNonEmpty::opt(input.storage_class),
region: None,
};
let predefined_acl = parse_predefined_acl(&input.predefined_acl)
.or_invalid_input("invalid predefined_acl")?;
let storage_class = parse_storage_class(&none_to_empty(bucket.storage_class.clone()))
.or_invalid_input("invalid storage_class")?;
let svc_info = if let Some(cred) = StringNonEmpty::opt(input.credentials_blob) {
Some(deserialize_service_account_info(cred)?)
} else {
None
};
Ok(Config {
bucket,
predefined_acl,
svc_info,
storage_class,
})
}
}
fn deserialize_service_account_info(
cred: StringNonEmpty,
) -> std::result::Result<ServiceAccountInfo, RequestError> {
ServiceAccountInfo::deserialize(cred.to_string())
.map_err(|e| RequestError::OAuth(e, "deserialize ServiceAccountInfo".to_string()))
}
impl BlobConfig for Config {
fn name(&self) -> &'static str {
STORAGE_NAME
}
fn url(&self) -> io::Result<url::Url> {
self.bucket.url("gcs").map_err(|s| {
io::Error::new(
io::ErrorKind::Other,
format!("error creating bucket url: {}", s),
)
})
}
}
// GCS compatible storage
#[derive(Clone)]
pub struct GCSStorage {
config: Config,
svc_access: Option<Arc<ServiceAccountAccess>>,
client: Client<HttpsConnector<HttpConnector>, Body>,
}
trait ResultExt {
type Ok;
// Maps the error of this result as an `std::io::Error` with `Other` error
// kind.
fn or_io_error<D: Display>(self, msg: D) -> io::Result<Self::Ok>;
// Maps the error of this result as an `std::io::Error` with `InvalidInput`
// error kind.
fn or_invalid_input<D: Display>(self, msg: D) -> io::Result<Self::Ok>;
}
impl<T, E: Display> ResultExt for Result<T, E> {
type Ok = T;
fn or_io_error<D: Display>(self, msg: D) -> io::Result<T> {
self.map_err(|e| io::Error::new(io::ErrorKind::Other, format!("{}: {}", msg, e)))
}
fn or_invalid_input<D: Display>(self, msg: D) -> io::Result<T> {
self.map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, format!("{}: {}", msg, e)))
}
}
enum RequestError {
Hyper(hyper::Error, String),
OAuth(tame_oauth::Error, String),
Gcs(tame_gcs::Error),
InvalidEndpoint(http::uri::InvalidUri),
}
impl From<http::uri::InvalidUri> for RequestError {
fn from(err: http::uri::InvalidUri) -> Self {
Self::InvalidEndpoint(err)
}
}
fn status_code_error(code: StatusCode, msg: String) -> RequestError {
RequestError::OAuth(tame_oauth::Error::HttpStatus(code), msg)
}
impl From<RequestError> for io::Error {
fn from(err: RequestError) -> Self {
match err {
RequestError::Hyper(e, msg) => {
Self::new(io::ErrorKind::InvalidInput, format!("HTTP {}: {}", msg, e))
}
RequestError::OAuth(tame_oauth::Error::Io(e), _) => e,
RequestError::OAuth(tame_oauth::Error::HttpStatus(sc), msg) => {
let fmt = format!("GCS OAuth: {}: {}", msg, sc);
match sc.as_u16() {
401 | 403 => Self::new(io::ErrorKind::PermissionDenied, fmt),
404 => Self::new(io::ErrorKind::NotFound, fmt),
_ if sc.is_server_error() => Self::new(io::ErrorKind::Interrupted, fmt),
_ => Self::new(io::ErrorKind::InvalidInput, fmt),
}
}
RequestError::OAuth(tame_oauth::Error::AuthError(e), msg) => Self::new(
io::ErrorKind::PermissionDenied,
format!("authorization failed: {}: {}", msg, e),
),
RequestError::OAuth(e, msg) => Self::new(
io::ErrorKind::InvalidInput,
format!("oauth failed: {}: {}", msg, e),
),
RequestError::Gcs(e) => Self::new(
io::ErrorKind::InvalidInput,
format!("invalid GCS request: {}", e),
),
RequestError::InvalidEndpoint(e) => Self::new(
io::ErrorKind::InvalidInput,
format!("invalid GCS endpoint URI: {}", e),
),
}
}
}
impl RetryError for RequestError {
fn is_retryable(&self) -> bool {
match self {
// FIXME: Inspect the error source?
Self::Hyper(e, _) => {
e.is_closed()
|| e.is_connect()
|| e.is_incomplete_message()
|| e.is_body_write_aborted()
}
// See https://cloud.google.com/storage/docs/exponential-backoff.
Self::OAuth(tame_oauth::Error::HttpStatus(StatusCode::TOO_MANY_REQUESTS), _) => true,
Self::OAuth(tame_oauth::Error::HttpStatus(StatusCode::REQUEST_TIMEOUT), _) => true,
Self::OAuth(tame_oauth::Error::HttpStatus(status), _) => status.is_server_error(),
// Consider everything else not retryable.
_ => false,
}
}
}
impl GCSStorage {
pub fn from_input(input: InputConfig) -> io::Result<Self> {
Self::new(Config::from_input(input)?)
}
pub fn from_cloud_dynamic(cloud_dynamic: &CloudDynamic) -> io::Result<Self> {
Self::new(Config::from_cloud_dynamic(cloud_dynamic)?)
}
/// Create a new GCS storage for the given config.
pub fn new(config: Config) -> io::Result<GCSStorage> {
let svc_access = if let Some(si) = &config.svc_info {
Some(
ServiceAccountAccess::new(si.clone())
.or_invalid_input("invalid credentials_blob")?,
)
} else {
None
};
let client = Client::builder().build(HttpsConnector::new());
Ok(GCSStorage {
config,
svc_access: svc_access.map(Arc::new),
client,
})
}
fn maybe_prefix_key(&self, key: &str) -> String {
if let Some(prefix) = &self.config.bucket.prefix {
return format!("{}/{}", prefix, key);
}
key.to_owned()
}
async fn set_auth(
&self,
req: &mut Request<Body>,
scope: tame_gcs::Scopes,
svc_access: Arc<ServiceAccountAccess>,
) -> Result<(), RequestError> | ));
}
let (parts, body) = res.into_parts();
let body = hyper::body::to_bytes(body)
.await
.map_err(|e| RequestError::Hyper(e, "set auth body".to_owned()))?;
svc_access
.parse_token_response(scope_hash, Response::from_parts(parts, body))
.map_err(|e| RequestError::OAuth(e, "set auth parse token".to_string()))?
}
};
req.headers_mut().insert(
http::header::AUTHORIZATION,
token
.try_into()
.map_err(|e| RequestError::OAuth(e, "set auth add auth header".to_string()))?,
);
Ok(())
}
async fn make_request(
&self,
mut req: Request<Body>,
scope: tame_gcs::Scopes,
) -> Result<Response<Body>, RequestError> {
// replace the hard-coded GCS endpoint by the custom one.
if let Some(endpoint) = &self.config.bucket.endpoint {
let uri = req.uri().to_string();
let new_url_opt = change_host(endpoint, &uri);
if let Some(new_url) = new_url_opt {
*req.uri_mut() = new_url.parse()?;
}
}
if let Some(svc_access) = &self.svc_access {
self.set_auth(&mut req, scope, svc_access.clone()).await?;
}
let uri = req.uri().to_string();
let res = self
.client
.request(req)
.await
.map_err(|e| RequestError::Hyper(e, uri.clone()))?;
if!res.status().is_success() {
return Err(status_code_error(res.status(), uri));
}
Ok(res)
}
fn error_to_async_read<E>(kind: io::ErrorKind, e: E) -> Box<dyn AsyncRead + Unpin>
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Box::new(error_stream(io::Error::new(kind, e)).into_async_read())
}
}
fn change_host(host: &StringNonEmpty, url: &str) -> Option<String> {
let new_host = (|| {
for hardcoded in HARDCODED_ENDPOINTS_SUFFIX {
if let Some(res) = host.strip_suffix(hardcoded) {
return StringNonEmpty::opt(res.to_owned()).unwrap();
}
}
host.to_owned()
})();
if let Some(res) = url.strip_prefix(GOOGLE_APIS) {
return Some([new_host.trim_end_matches('/'), res].concat());
}
None
}
// Convert manually since they don't implement FromStr.
fn parse_storage_class(sc: &str) -> Result<Option<StorageClass>, &str> {
Ok(Some(match sc {
"" => return Ok(None),
"STANDARD" => StorageClass::Standard,
"NEARLINE" => StorageClass::Nearline,
"COLDLINE" => StorageClass::Coldline,
"DURABLE_REDUCED_AVAILABILITY" => StorageClass::DurableReducedAvailability,
"REGIONAL" => StorageClass::Regional,
"MULTI_REGIONAL" => StorageClass::MultiRegional,
_ => return Err(sc),
}))
}
fn parse_predefined_acl(acl: &str) -> Result<Option<PredefinedAcl>, &str> {
Ok(Some(match acl {
"" => return Ok(None),
"authenticatedRead" => PredefinedAcl::AuthenticatedRead,
"bucketOwnerFullControl" => PredefinedAcl::BucketOwnerFullControl,
"bucketOwnerRead" => PredefinedAcl::BucketOwnerRead,
"private" => PredefinedAcl::Private,
"projectPrivate" => PredefinedAcl::ProjectPrivate,
"publicRead" => PredefinedAcl::PublicRead,
_ => return Err(acl),
}))
}
const STORAGE_NAME: &str = "gcs";
impl BlobStorage for GCSStorage {
fn config(&self) -> Box<dyn BlobConfig> {
Box::new(self.config.clone()) as Box<dyn BlobConfig>
}
fn put(
&self,
name: &str,
mut reader: Box<dyn AsyncRead + Send + Unpin>,
content_length: u64,
) -> io::Result<()> {
if content_length == 0 {
// It is probably better to just write the empty file
// However, currently going forward results in a body write aborted error
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"no content to write",
));
}
use std::convert::TryFrom;
let key = self.maybe_prefix_key(name);
debug!("save file to GCS storage"; "key" => %key);
let bucket = BucketName::try_from(self.config.bucket.bucket.to_string())
.or_invalid_input(format_args!("invalid bucket {}", self.config.bucket.bucket))?;
let metadata = Metadata {
name: Some(key),
storage_class: self.config.storage_class,
..Default::default()
};
block_on_external_io(async move {
// FIXME: Switch to upload() API so we don't need to read the entire data into memory
// in order to retry.
let mut data = Vec::with_capacity(content_length as usize);
reader.read_to_end(&mut data).await?;
retry(|| async {
let data = Cursor::new(data.clone());
let req = Object::insert_multipart(
&bucket,
data,
content_length,
&metadata,
Some(InsertObjectOptional {
predefined_acl: self.config.predefined_acl,
..Default::default()
}),
)
.map_err(RequestError::Gcs)?
.map(|reader| Body::wrap_stream(AsyncReadAsSyncStreamOfBytes::new(reader)));
self.make_request(req, tame_gcs::Scopes::ReadWrite).await
})
.await?;
Ok::<_, io::Error>(())
})?;
Ok(())
}
fn get(&self, name: &str) -> Box<dyn AsyncRead + Unpin + '_> {
let bucket = self.config.bucket.bucket.to_string();
let name = self.maybe_prefix_key(name);
debug!("read file from GCS storage"; "key" => %name);
let oid = match ObjectId::new(bucket, name) {
Ok(oid) => oid,
Err(e) => return GCSStorage::error_to_async_read(io::ErrorKind::InvalidInput, e),
};
let request = match Object::download(&oid, None /*optional*/) {
Ok(request) => request.map(|_: io::Empty| Body::empty()),
Err(e) => return GCSStorage::error_to_async_read(io::ErrorKind::Other, e),
};
Box::new(
self.make_request(request, tame_gcs::Scopes::ReadOnly)
.and_then(|response| async {
if response.status().is_success() {
Ok(response.into_body().map_err(|e| {
io::Error::new(
io::ErrorKind::Other,
format!("download from GCS error: {}", e),
)
}))
} else {
Err(status_code_error(
response.status(),
"bucket read".to_string(),
))
}
})
.err_into::<io::Error>()
.try_flatten_stream()
.boxed() // this `.boxed()` pin the stream.
.into_async_read(),
)
}
}
#[cfg(test)]
mod tests {
use super::*;
use matches::assert_matches;
const HARDCODED_ENDPOINTS: &[&str] = &[
"https://www.googleapis.com/upload/storage/v1",
"https://www.googleapis.com/storage/v1",
];
#[test]
fn test_change_host() {
let host = StringNonEmpty::static_str("http://localhost:4443");
assert_eq!(
&change_host(&host, &format!("{}/storage/v1/foo", GOOGLE_APIS)).unwrap(),
"http://localhost:4443/storage/v1/foo"
);
let h1 = url::Url::parse(HARDCODED_ENDPOINTS[0]).unwrap();
let h2 = url::Url::parse(HARDCODED_ENDPOINTS[1]).unwrap();
let endpoint = StringNonEmpty::static_str("http://example.com");
assert_eq!(
&change_host(&endpoint, h1.as_str()).unwrap(),
"http://example.com/upload/storage/v1"
);
assert_eq!(
&change_host(&endpoint, h2.as_str()).unwrap(),
"http://example.com/storage/v1"
);
assert_eq!(
&change_host(&endpoint, &format!("{}/foo", h2)).unwrap(),
"http://example.com/storage/v1/foo"
);
assert_matches!(&change_host(&endpoint, "foo"), None);
// if we get the endpoint with suffix "/storage/v1/"
let endpoint = StringNonEmpty::static_str("http://example.com/storage/v1/");
assert_eq!(
&change_host(&endpoint, &format!("{}/foo", h2)). | {
let token_or_request = svc_access
.get_token(&[scope])
.map_err(|e| RequestError::OAuth(e, "get_token".to_string()))?;
let token = match token_or_request {
TokenOrRequest::Token(token) => token,
TokenOrRequest::Request {
request,
scope_hash,
..
} => {
let res = self
.client
.request(request.map(From::from))
.await
.map_err(|e| RequestError::Hyper(e, "set auth request".to_owned()))?;
if !res.status().is_success() {
return Err(status_code_error(
res.status(),
"set auth request".to_string(), | identifier_body |
lib.rs | //! # nom, eating data byte by byte
//!
//! nom is a parser combinator library with a focus on safe parsing,
//! streaming patterns, and as much as possible zero copy.
//!
//! ## Example
//!
//! ```rust
//! use nom::{
//! IResult,
//! bytes::complete::{tag, take_while_m_n},
//! combinator::map_res,
//! sequence::tuple};
//!
//! #[derive(Debug,PartialEq)]
//! pub struct Color {
//! pub red: u8,
//! pub green: u8,
//! pub blue: u8,
//! }
//!
//! fn from_hex(input: &str) -> Result<u8, std::num::ParseIntError> {
//! u8::from_str_radix(input, 16)
//! }
//!
//! fn is_hex_digit(c: char) -> bool {
//! c.is_digit(16)
//! }
//!
//! fn hex_primary(input: &str) -> IResult<&str, u8> {
//! map_res(
//! take_while_m_n(2, 2, is_hex_digit),
//! from_hex
//! )(input)
//! }
//!
//! fn hex_color(input: &str) -> IResult<&str, Color> {
//! let (input, _) = tag("#")(input)?;
//! let (input, (red, green, blue)) = tuple((hex_primary, hex_primary, hex_primary))(input)?;
//!
//! Ok((input, Color { red, green, blue }))
//! }
//!
//! fn main() {
//! assert_eq!(hex_color("#2F14DF"), Ok(("", Color {
//! red: 47,
//! green: 20,
//! blue: 223,
//! })));
//! }
//! ```
//!
//! The code is available on [Github](https://github.com/Geal/nom)
//!
//! There are a few [guides](https://github.com/Geal/nom/tree/master/doc) with more details
//! about [how to write parsers](https://github.com/Geal/nom/blob/master/doc/making_a_new_parser_from_scratch.md),
//! or the [error management system](https://github.com/Geal/nom/blob/master/doc/error_management.md).
//! You can also check out the [recipes] module that contains examples of common patterns.
//!
//! **Looking for a specific combinator? Read the
//! ["choose a combinator" guide](https://github.com/Geal/nom/blob/master/doc/choosing_a_combinator.md)**
//!
//! If you are upgrading to nom 5.0, please read the
//! [migration document](https://github.com/Geal/nom/blob/master/doc/upgrading_to_nom_5.md).
//!
//! ## Parser combinators
//!
//! Parser combinators are an approach to parsers that is very different from
//! software like [lex](https://en.wikipedia.org/wiki/Lex_(software)) and
//! [yacc](https://en.wikipedia.org/wiki/Yacc). Instead of writing the grammar
//! in a separate syntax and generating the corresponding code, you use very small
//! functions with very specific purposes, like "take 5 bytes", or "recognize the
//! word 'HTTP'", and assemble them in meaningful patterns like "recognize
//! 'HTTP', then a space, then a version".
//! The resulting code is small, and looks like the grammar you would have
//! written with other parser approaches.
//!
//! This gives us a few advantages:
//!
//! - The parsers are small and easy to write
//! - The parsers components are easy to reuse (if they're general enough, please add them to nom!)
//! - The parsers components are easy to test separately (unit tests and property-based tests)
//! - The parser combination code looks close to the grammar you would have written
//! - You can build partial parsers, specific to the data you need at the moment, and ignore the rest
//!
//! Here is an example of one such parser, to recognize text between parentheses:
//!
//! ```rust
//! use nom::{
//! IResult,
//! sequence::delimited,
//! // see the "streaming/complete" paragraph lower for an explanation of these submodules
//! character::complete::char,
//! bytes::complete::is_not
//! };
//!
//! fn parens(input: &str) -> IResult<&str, &str> {
//! delimited(char('('), is_not(")"), char(')'))(input)
//! }
//! ```
//!
//! It defines a function named `parens` which will recognize a sequence of the
//! character `(`, the longest byte array not containing `)`, then the character
//! `)`, and will return the byte array in the middle.
//!
//! Here is another parser, written without using nom's combinators this time:
//!
//! ```rust
//! use nom::{IResult, Err, Needed};
//!
//! # fn main() {
//! fn take4(i: &[u8]) -> IResult<&[u8], &[u8]>{
//! if i.len() < 4 {
//! Err(Err::Incomplete(Needed::new(4)))
//! } else {
//! Ok((&i[4..], &i[0..4]))
//! }
//! }
//! # }
//! ```
//!
//! This function takes a byte array as input, and tries to consume 4 bytes.
//! Writing all the parsers manually, like this, is dangerous, despite Rust's
//! safety features. There are still a lot of mistakes one can make. That's why
//! nom provides a list of functions to help in developing parsers.
//!
//! With functions, you would write it like this:
//!
//! ```rust
//! use nom::{IResult, bytes::streaming::take};
//! fn take4(input: &str) -> IResult<&str, &str> {
//! take(4u8)(input)
//! }
//! ```
//!
//! A parser in nom is a function which, for an input type `I`, an output type `O`
//! and an optional error type `E`, will have the following signature:
//!
//! ```rust,compile_fail
//! fn parser(input: I) -> IResult<I, O, E>;
//! ```
//!
//! Or like this, if you don't want to specify a custom error type (it will be `(I, ErrorKind)` by default):
//!
//! ```rust,compile_fail
//! fn parser(input: I) -> IResult<I, O>;
//! ```
//!
//! `IResult` is an alias for the `Result` type:
//!
//! ```rust
//! use nom::{Needed, error::Error};
//!
//! type IResult<I, O, E = Error<I>> = Result<(I, O), Err<E>>;
//!
//! enum Err<E> {
//! Incomplete(Needed),
//! Error(E),
//! Failure(E),
//! }
//! ```
//!
//! It can have the following values:
//!
//! - A correct result `Ok((I,O))` with the first element being the remaining of the input (not parsed yet), and the second the output value;
//! - An error `Err(Err::Error(c))` with `c` an error that can be built from the input position and a parser specific error
//! - An error `Err(Err::Incomplete(Needed))` indicating that more input is necessary. `Needed` can indicate how much data is needed
//! - An error `Err(Err::Failure(c))`. It works like the `Error` case, except it indicates an unrecoverable error: We cannot backtrack and test another parser
//!
//! Please refer to the ["choose a combinator" guide](https://github.com/Geal/nom/blob/master/doc/choosing_a_combinator.md) for an exhaustive list of parsers.
//! See also the rest of the documentation [here](https://github.com/Geal/nom/blob/master/doc).
//!
//! ## Making new parsers with function combinators
//!
//! nom is based on functions that generate parsers, with a signature like
//! this: `(arguments) -> impl Fn(Input) -> IResult<Input, Output, Error>`.
//! The arguments of a combinator can be direct values (like `take` which uses
//! a number of bytes or character as argument) or even other parsers (like
//! `delimited` which takes as argument 3 parsers, and returns the result of
//! the second one if all are successful).
//!
//! Here are some examples:
//!
//! ```rust
//! use nom::IResult;
//! use nom::bytes::complete::{tag, take};
//! fn abcd_parser(i: &str) -> IResult<&str, &str> {
//! tag("abcd")(i) // will consume bytes if the input begins with "abcd"
//! }
//!
//! fn take_10(i: &[u8]) -> IResult<&[u8], &[u8]> {
//! take(10u8)(i) // will consume and return 10 bytes of input
//! }
//! ```
//!
//! ## Combining parsers
//!
//! There are higher level patterns, like the **`alt`** combinator, which
//! provides a choice between multiple parsers. If one branch fails, it tries
//! the next, and returns the result of the first parser that succeeds:
//!
//! ```rust
//! use nom::IResult;
//! use nom::branch::alt;
//! use nom::bytes::complete::tag;
//!
//! let mut alt_tags = alt((tag("abcd"), tag("efgh")));
//!
//! assert_eq!(alt_tags(&b"abcdxxx"[..]), Ok((&b"xxx"[..], &b"abcd"[..])));
//! assert_eq!(alt_tags(&b"efghxxx"[..]), Ok((&b"xxx"[..], &b"efgh"[..])));
//! assert_eq!(alt_tags(&b"ijklxxx"[..]), Err(nom::Err::Error((&b"ijklxxx"[..], nom::error::ErrorKind::Tag))));
//! ```
//!
//! The **`opt`** combinator makes a parser optional. If the child parser returns
//! an error, **`opt`** will still succeed and return None:
//!
//! ```rust
//! use nom::{IResult, combinator::opt, bytes::complete::tag};
//! fn abcd_opt(i: &[u8]) -> IResult<&[u8], Option<&[u8]>> {
//! opt(tag("abcd"))(i)
//! }
//!
//! assert_eq!(abcd_opt(&b"abcdxxx"[..]), Ok((&b"xxx"[..], Some(&b"abcd"[..]))));
//! assert_eq!(abcd_opt(&b"efghxxx"[..]), Ok((&b"efghxxx"[..], None)));
//! ```
//!
//! **`many0`** applies a parser 0 or more times, and returns a vector of the aggregated results:
//!
//! ```rust
//! # #[cfg(feature = "alloc")]
//! # fn main() {
//! use nom::{IResult, multi::many0, bytes::complete::tag};
//! use std::str;
//!
//! fn multi(i: &str) -> IResult<&str, Vec<&str>> {
//! many0(tag("abcd"))(i)
//! }
//!
//! let a = "abcdef";
//! let b = "abcdabcdef";
//! let c = "azerty";
//! assert_eq!(multi(a), Ok(("ef", vec!["abcd"])));
//! assert_eq!(multi(b), Ok(("ef", vec!["abcd", "abcd"])));
//! assert_eq!(multi(c), Ok(("azerty", Vec::new())));
//! # }
//! # #[cfg(not(feature = "alloc"))]
//! # fn main() {}
//! ```
//!
//! Here are some basic combinators available:
//!
//! - **`opt`**: Will make the parser optional (if it returns the `O` type, the new parser returns `Option<O>`) | //! There are more complex (and more useful) parsers like `tuple!`, which is
//! used to apply a series of parsers then assemble their results.
//!
//! Example with `tuple`:
//!
//! ```rust
//! # fn main() {
//! use nom::{error::ErrorKind, Needed,
//! number::streaming::be_u16,
//! bytes::streaming::{tag, take},
//! sequence::tuple};
//!
//! let mut tpl = tuple((be_u16, take(3u8), tag("fg")));
//!
//! assert_eq!(
//! tpl(&b"abcdefgh"[..]),
//! Ok((
//! &b"h"[..],
//! (0x6162u16, &b"cde"[..], &b"fg"[..])
//! ))
//! );
//! assert_eq!(tpl(&b"abcde"[..]), Err(nom::Err::Incomplete(Needed::new(2))));
//! let input = &b"abcdejk"[..];
//! assert_eq!(tpl(input), Err(nom::Err::Error((&input[5..], ErrorKind::Tag))));
//! # }
//! ```
//!
//! But you can also use a sequence of combinators written in imperative style,
//! thanks to the `?` operator:
//!
//! ```rust
//! # fn main() {
//! use nom::{IResult, bytes::complete::tag};
//!
//! #[derive(Debug, PartialEq)]
//! struct A {
//! a: u8,
//! b: u8
//! }
//!
//! fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Ok((i,1)) }
//! fn ret_int2(i:&[u8]) -> IResult<&[u8], u8> { Ok((i,2)) }
//!
//! fn f(i: &[u8]) -> IResult<&[u8], A> {
//! // if successful, the parser returns `Ok((remaining_input, output_value))` that we can destructure
//! let (i, _) = tag("abcd")(i)?;
//! let (i, a) = ret_int1(i)?;
//! let (i, _) = tag("efgh")(i)?;
//! let (i, b) = ret_int2(i)?;
//!
//! Ok((i, A { a, b }))
//! }
//!
//! let r = f(b"abcdefghX");
//! assert_eq!(r, Ok((&b"X"[..], A{a: 1, b: 2})));
//! # }
//! ```
//!
//! ## Streaming / Complete
//!
//! Some of nom's modules have `streaming` or `complete` submodules. They hold
//! different variants of the same combinators.
//!
//! A streaming parser assumes that we might not have all of the input data.
//! This can happen with some network protocol or large file parsers, where the
//! input buffer can be full and need to be resized or refilled.
//!
//! A complete parser assumes that we already have all of the input data.
//! This will be the common case with small files that can be read entirely to
//! memory.
//!
//! Here is how it works in practice:
//!
//! ```rust
//! use nom::{IResult, Err, Needed, error::{Error, ErrorKind}, bytes, character};
//!
//! fn take_streaming(i: &[u8]) -> IResult<&[u8], &[u8]> {
//! bytes::streaming::take(4u8)(i)
//! }
//!
//! fn take_complete(i: &[u8]) -> IResult<&[u8], &[u8]> {
//! bytes::complete::take(4u8)(i)
//! }
//!
//! // both parsers will take 4 bytes as expected
//! assert_eq!(take_streaming(&b"abcde"[..]), Ok((&b"e"[..], &b"abcd"[..])));
//! assert_eq!(take_complete(&b"abcde"[..]), Ok((&b"e"[..], &b"abcd"[..])));
//!
//! // if the input is smaller than 4 bytes, the streaming parser
//! // will return `Incomplete` to indicate that we need more data
//! assert_eq!(take_streaming(&b"abc"[..]), Err(Err::Incomplete(Needed::new(1))));
//!
//! // but the complete parser will return an error
//! assert_eq!(take_complete(&b"abc"[..]), Err(Err::Error(Error::new(&b"abc"[..], ErrorKind::Eof))));
//!
//! // the alpha0 function recognizes 0 or more alphabetic characters
//! fn alpha0_streaming(i: &str) -> IResult<&str, &str> {
//! character::streaming::alpha0(i)
//! }
//!
//! fn alpha0_complete(i: &str) -> IResult<&str, &str> {
//! character::complete::alpha0(i)
//! }
//!
//! // if there's a clear limit to the recognized characters, both parsers work the same way
//! assert_eq!(alpha0_streaming("abcd;"), Ok((";", "abcd")));
//! assert_eq!(alpha0_complete("abcd;"), Ok((";", "abcd")));
//!
//! // but when there's no limit, the streaming version returns `Incomplete`, because it cannot
//! // know if more input data should be recognized. The whole input could be "abcd;", or
//! // "abcde;"
//! assert_eq!(alpha0_streaming("abcd"), Err(Err::Incomplete(Needed::new(1))));
//!
//! // while the complete version knows that all of the data is there
//! assert_eq!(alpha0_complete("abcd"), Ok(("", "abcd")));
//! ```
//! **Going further:** Read the [guides](https://github.com/Geal/nom/tree/master/doc),
//! check out the [recipes]!
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::doc_markdown))]
#![cfg_attr(nightly, feature(test))]
#![cfg_attr(feature = "docsrs", feature(doc_cfg))]
#![cfg_attr(feature = "docsrs", feature(extended_key_value_attributes))]
#![deny(missing_docs)]
#[cfg_attr(nightly, warn(rustdoc::missing_doc_code_examples))]
#[cfg(feature = "alloc")]
#[macro_use]
extern crate alloc;
#[cfg(doctest)]
extern crate doc_comment;
#[cfg(nightly)]
extern crate test;
#[cfg(doctest)]
doc_comment::doctest!("../README.md");
/// Lib module to re-export everything needed from `std` or `core`/`alloc`. This is how `serde` does
/// it, albeit there it is not public.
#[cfg_attr(nightly, allow(rustdoc::missing_doc_code_examples))]
pub mod lib {
/// `std` facade allowing `std`/`core` to be interchangeable. Reexports `alloc` crate optionally,
/// as well as `core` or `std`
#[cfg(not(feature = "std"))]
#[cfg_attr(nightly, allow(rustdoc::missing_doc_code_examples))]
/// internal std exports for no_std compatibility
pub mod std {
#[doc(hidden)]
#[cfg(not(feature = "alloc"))]
pub use core::borrow;
#[cfg(feature = "alloc")]
#[doc(hidden)]
pub use alloc::{borrow, boxed, string, vec};
#[doc(hidden)]
pub use core::{cmp, convert, fmt, iter, mem, ops, option, result, slice, str};
/// internal reproduction of std prelude
#[doc(hidden)]
pub mod prelude {
pub use core::prelude as v1;
}
}
#[cfg(feature = "std")]
#[cfg_attr(nightly, allow(rustdoc::missing_doc_code_examples))]
/// internal std exports for no_std compatibility
pub mod std {
#[doc(hidden)]
pub use std::{
alloc, borrow, boxed, cmp, collections, convert, fmt, hash, iter, mem, ops, option, result,
slice, str, string, vec,
};
/// internal reproduction of std prelude
#[doc(hidden)]
pub mod prelude {
pub use std::prelude as v1;
}
}
}
pub use self::bits::*;
pub use self::internal::*;
pub use self::traits::*;
pub use self::str::*;
#[macro_use]
pub mod error;
pub mod combinator;
mod internal;
mod traits;
#[macro_use]
pub mod branch;
pub mod multi;
pub mod sequence;
pub mod bits;
pub mod bytes;
pub mod character;
mod str;
pub mod number;
#[cfg(feature = "docsrs")]
#[cfg_attr(feature = "docsrs", cfg_attr(feature = "docsrs", doc = include_str!("../doc/nom_recipes.md")))]
pub mod recipes {} | //! - **`many0`**: Will apply the parser 0 or more times (if it returns the `O` type, the new parser returns `Vec<O>`)
//! - **`many1`**: Will apply the parser 1 or more times
//! | random_line_split |
mod.rs | //! Lexical analysis.
use std::str;
use std::fmt;
use kailua_diag::{Locale, Localize, Localized};
use string::{Name, Str};
/// A token.
#[derive(Clone, Debug, PartialEq)]
pub enum Tok {
/// A token which is distinct from all other tokens.
///
/// The lexer emits this token on an error.
Error,
/// A comment token. The parser should ignore this.
///
/// The shebang line (the first line starting with `#`) is also considered as a comment.
Comment,
/// A punctuation.
Punct(Punct),
/// A keyword.
Keyword(Keyword),
/// A number.
Num(f64),
/// A name (either an identifier or a quoted name in the meta block).
Name(Name),
/// A string (either `"string"` or `[[string]]`).
Str(Str),
/// The end of file.
///
/// A valid stream of tokens is expected to have only one EOF token at the end.
EOF,
}
impl Localize for Tok {
fn fmt_localized(&self, f: &mut fmt::Formatter, locale: Locale) -> fmt::Result {
match (&locale[..], self) {
("ko", &Tok::Error) => write!(f, "잘못된 문자"),
(_, &Tok::Error) => write!(f, "an invalid character"),
("ko", &Tok::Comment) => write!(f, "주석"),
(_, &Tok::Comment) => write!(f, "a comment"),
(_, &Tok::Punct(p)) => write!(f, "{}", Localized::new(&p, locale)),
(_, &Tok::Keyword(w)) => write!(f, "{}", Localized::new(&w, locale)),
("ko", &Tok::Num(_)) => write!(f, "숫자"),
(_, &Tok::Num(_)) => write!(f, "a number"),
("ko", &Tok::Name(_)) => write!(f, "이름"),
(_, &Tok::Name(_)) => write!(f, "a name"),
("ko", &Tok::Str(_)) => write!(f, "문자열 리터럴"),
(_, &Tok::Str(_)) => write!(f, "a string literal"),
("ko", &Tok::EOF) => write!(f, "파일의 끝"),
(_, &Tok::EOF) => write!(f, "the end of file"),
}
}
}
impl<'a> Localize for &'a Tok {
fn fmt_localized(&self, f: &mut fmt::Formatter, locale: Locale) -> fmt::Result {
(**self).fmt_localized(f, locale)
}
}
macro_rules! define_puncts {
($ty:ident |$locale:ident|: $($i:ident $t:expr, #[$m:meta])*) => (
/// A punctuation.
///
/// This includes Kailua-specific punctuations,
/// which are only generated in the meta block (marked as [M] below).
/// Some of them are also only generated after a particular Lua version
/// (marked as [5.x+] below).
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum $ty { $(#[$m] $i,)* }
impl Localize for $ty {
fn fmt_localized(&self, f: &mut fmt::Formatter, $locale: Locale) -> fmt::Result {
let text = match *self { $($ty::$i => $t,)* };
fmt::Display::fmt(text, f)
}
}
);
}
define_puncts! { Punct |locale|:
Plus "`+`", /// `+`.
Dash "`-`", /// `-`.
Star "`*`", /// `*`.
Slash "`/`", /// `/`.
Percent "`%`", /// `%`.
Caret "`^`", /// `^`.
Hash "`#`", /// `#`.
EqEq "`==`", /// `==`.
TildeEq "`~=`", /// `~=`.
LtEq "`<=`", /// `<=`.
GtEq "`>=`", /// `>=`.
Lt "`<`", /// `<`.
Gt "`>`", /// `>`.
Eq "`=`", /// `=`.
Amp "`&`", /// `&`. [5.3+]
Tilde "`~`", /// `~`. [5.3+]
Pipe "`|`", /// `|`. [5.3+ or M]
LtLt "`<<`", /// `<<`. [5.3+]
GtGt "`>>`", /// `>>`. [5.3+]
SlashSlash "`//`", /// `//`. [5.3+]
LParen "`(`", /// `(`.
RParen "`)`", /// `)`.
LBrace "`{`", /// `{`.
RBrace "`}`", /// `}`.
LBracket "`[`", /// `[`.
RBracket "`]`", /// `]`.
Semicolon "`;`", /// `;`.
Colon "`:`", /// `:`.
ColonColon "`::`", /// `::`. [5.2+]
Comma "`,`", /// `,`.
Dot "`.`", /// `.`.
DotDot "`..`", /// `..`.
DotDotDot "`...`", /// `...`.
// Kailua extensions | DashDashColon "`--:`", /// `--:`. [M]
DashDashGt "`-->`", /// `-->`. [M]
Ques "`?`", /// `?`. [M]
Bang "`!`", /// `!`. [M]
Newline match &locale[..] { "ko" => "개행문자", _ => "a newline" },
/// A newline. Only generated at the end of the meta block.
}
macro_rules! define_keywords {
($ty:ident: everywhere { $($i:ident $t:expr, #[$m:meta])* }
meta_only { $($mi:ident $mt:expr, #[$mm:meta])* }) => (
/// A keyword.
///
/// This includes Kailua-specific keywords,
/// which are only generated in the meta block (marked as [M] below).
/// Some of them are also only generated after a particular Lua version
/// (marked as [5.x+] below).
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum $ty { $(#[$m] $i,)* $(#[$mm] $mi,)* }
impl $ty {
pub fn from(s: &[u8], in_meta: bool) -> Option<Keyword> {
match (in_meta, s) {
$((_, $t) => Some(Keyword::$i),)*
$((true, $mt) => Some(Keyword::$mi),)*
(_, _) => None,
}
}
pub fn name(&self) -> &'static [u8] {
match *self { $($ty::$i => $t,)* $($ty::$mi => $mt,)* }
}
}
);
}
define_keywords! { Keyword:
everywhere {
And b"and", /// `and`.
Break b"break", /// `break`.
Do b"do", /// `do`.
Else b"else", /// `else`.
Elseif b"elseif", /// `elseif`.
End b"end", /// `end`.
False b"false", /// `false`.
For b"for", /// `for`.
Function b"function", /// `function`.
Goto b"goto", /// `goto`. [5.2+; a normal identifier in Lua 5.1]
If b"if", /// `if`.
In b"in", /// `in`.
Local b"local", /// `local`.
Nil b"nil", /// `nil`.
Not b"not", /// `not`.
Or b"or", /// `or`.
Repeat b"repeat", /// `repeat`.
Return b"return", /// `return`.
Then b"then", /// `then`.
True b"true", /// `true`.
Until b"until", /// `until`.
While b"while", /// `while`.
}
meta_only { // Kailua extensions
Assume b"assume", /// `assume`. [M]
Class b"class", /// `class`. [M]
Const b"const", /// `const`. [M]
Global b"global", /// `global`. [M]
Map b"map", /// `map`. [M]
Method b"method", /// `method`. [M]
Module b"module", /// `module`. [M]
Once b"once", /// `once`. [M]
Open b"open", /// `open`. [M]
Static b"static", /// `static`. [M]
Type b"type", /// `type`. [M]
Var b"var", /// `var`. [M]
Vector b"vector", /// `vector`. [M]
}
}
impl From<Keyword> for Str {
fn from(kw: Keyword) -> Str {
kw.name().into()
}
}
impl From<Keyword> for Name {
fn from(kw: Keyword) -> Name {
kw.name().into()
}
}
impl Localize for Keyword {
fn fmt_localized(&self, f: &mut fmt::Formatter, locale: Locale) -> fmt::Result {
let name = str::from_utf8(self.name()).unwrap();
match &locale[..] {
"ko" => write!(f, "예약어 `{}`", name),
_ => write!(f, "a keyword `{}`", name),
}
}
}
mod lexer;
mod nesting;
pub use self::lexer::Lexer;
pub use self::nesting::{Nest, NestedToken, NestingCategory, NestingSerial}; | DashDashHash "`--#`", /// `--#`. [M]
DashDashV "`--v`", /// `--v`. [M] | random_line_split |
mod.rs | //! Lexical analysis.
use std::str;
use std::fmt;
use kailua_diag::{Locale, Localize, Localized};
use string::{Name, Str};
/// A token.
#[derive(Clone, Debug, PartialEq)]
pub enum | {
/// A token which is distinct from all other tokens.
///
/// The lexer emits this token on an error.
Error,
/// A comment token. The parser should ignore this.
///
/// The shebang line (the first line starting with `#`) is also considered as a comment.
Comment,
/// A punctuation.
Punct(Punct),
/// A keyword.
Keyword(Keyword),
/// A number.
Num(f64),
/// A name (either an identifier or a quoted name in the meta block).
Name(Name),
/// A string (either `"string"` or `[[string]]`).
Str(Str),
/// The end of file.
///
/// A valid stream of tokens is expected to have only one EOF token at the end.
EOF,
}
impl Localize for Tok {
fn fmt_localized(&self, f: &mut fmt::Formatter, locale: Locale) -> fmt::Result {
match (&locale[..], self) {
("ko", &Tok::Error) => write!(f, "잘못된 문자"),
(_, &Tok::Error) => write!(f, "an invalid character"),
("ko", &Tok::Comment) => write!(f, "주석"),
(_, &Tok::Comment) => write!(f, "a comment"),
(_, &Tok::Punct(p)) => write!(f, "{}", Localized::new(&p, locale)),
(_, &Tok::Keyword(w)) => write!(f, "{}", Localized::new(&w, locale)),
("ko", &Tok::Num(_)) => write!(f, "숫자"),
(_, &Tok::Num(_)) => write!(f, "a number"),
("ko", &Tok::Name(_)) => write!(f, "이름"),
(_, &Tok::Name(_)) => write!(f, "a name"),
("ko", &Tok::Str(_)) => write!(f, "문자열 리터럴"),
(_, &Tok::Str(_)) => write!(f, "a string literal"),
("ko", &Tok::EOF) => write!(f, "파일의 끝"),
(_, &Tok::EOF) => write!(f, "the end of file"),
}
}
}
impl<'a> Localize for &'a Tok {
fn fmt_localized(&self, f: &mut fmt::Formatter, locale: Locale) -> fmt::Result {
(**self).fmt_localized(f, locale)
}
}
macro_rules! define_puncts {
($ty:ident |$locale:ident|: $($i:ident $t:expr, #[$m:meta])*) => (
/// A punctuation.
///
/// This includes Kailua-specific punctuations,
/// which are only generated in the meta block (marked as [M] below).
/// Some of them are also only generated after a particular Lua version
/// (marked as [5.x+] below).
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum $ty { $(#[$m] $i,)* }
impl Localize for $ty {
fn fmt_localized(&self, f: &mut fmt::Formatter, $locale: Locale) -> fmt::Result {
let text = match *self { $($ty::$i => $t,)* };
fmt::Display::fmt(text, f)
}
}
);
}
define_puncts! { Punct |locale|:
Plus "`+`", /// `+`.
Dash "`-`", /// `-`.
Star "`*`", /// `*`.
Slash "`/`", /// `/`.
Percent "`%`", /// `%`.
Caret "`^`", /// `^`.
Hash "`#`", /// `#`.
EqEq "`==`", /// `==`.
TildeEq "`~=`", /// `~=`.
LtEq "`<=`", /// `<=`.
GtEq "`>=`", /// `>=`.
Lt "`<`", /// `<`.
Gt "`>`", /// `>`.
Eq "`=`", /// `=`.
Amp "`&`", /// `&`. [5.3+]
Tilde "`~`", /// `~`. [5.3+]
Pipe "`|`", /// `|`. [5.3+ or M]
LtLt "`<<`", /// `<<`. [5.3+]
GtGt "`>>`", /// `>>`. [5.3+]
SlashSlash "`//`", /// `//`. [5.3+]
LParen "`(`", /// `(`.
RParen "`)`", /// `)`.
LBrace "`{`", /// `{`.
RBrace "`}`", /// `}`.
LBracket "`[`", /// `[`.
RBracket "`]`", /// `]`.
Semicolon "`;`", /// `;`.
Colon "`:`", /// `:`.
ColonColon "`::`", /// `::`. [5.2+]
Comma "`,`", /// `,`.
Dot "`.`", /// `.`.
DotDot "`..`", /// `..`.
DotDotDot "`...`", /// `...`.
// Kailua extensions
DashDashHash "`--#`", /// `--#`. [M]
DashDashV "`--v`", /// `--v`. [M]
DashDashColon "`--:`", /// `--:`. [M]
DashDashGt "`-->`", /// `-->`. [M]
Ques "`?`", /// `?`. [M]
Bang "`!`", /// `!`. [M]
Newline match &locale[..] { "ko" => "개행문자", _ => "a newline" },
/// A newline. Only generated at the end of the meta block.
}
macro_rules! define_keywords {
($ty:ident: everywhere { $($i:ident $t:expr, #[$m:meta])* }
meta_only { $($mi:ident $mt:expr, #[$mm:meta])* }) => (
/// A keyword.
///
/// This includes Kailua-specific keywords,
/// which are only generated in the meta block (marked as [M] below).
/// Some of them are also only generated after a particular Lua version
/// (marked as [5.x+] below).
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum $ty { $(#[$m] $i,)* $(#[$mm] $mi,)* }
impl $ty {
pub fn from(s: &[u8], in_meta: bool) -> Option<Keyword> {
match (in_meta, s) {
$((_, $t) => Some(Keyword::$i),)*
$((true, $mt) => Some(Keyword::$mi),)*
(_, _) => None,
}
}
pub fn name(&self) -> &'static [u8] {
match *self { $($ty::$i => $t,)* $($ty::$mi => $mt,)* }
}
}
);
}
define_keywords! { Keyword:
everywhere {
And b"and", /// `and`.
Break b"break", /// `break`.
Do b"do", /// `do`.
Else b"else", /// `else`.
Elseif b"elseif", /// `elseif`.
End b"end", /// `end`.
False b"false", /// `false`.
For b"for", /// `for`.
Function b"function", /// `function`.
Goto b"goto", /// `goto`. [5.2+; a normal identifier in Lua 5.1]
If b"if", /// `if`.
In b"in", /// `in`.
Local b"local", /// `local`.
Nil b"nil", /// `nil`.
Not b"not", /// `not`.
Or b"or", /// `or`.
Repeat b"repeat", /// `repeat`.
Return b"return", /// `return`.
Then b"then", /// `then`.
True b"true", /// `true`.
Until b"until", /// `until`.
While b"while", /// `while`.
}
meta_only { // Kailua extensions
Assume b"assume", /// `assume`. [M]
Class b"class", /// `class`. [M]
Const b"const", /// `const`. [M]
Global b"global", /// `global`. [M]
Map b"map", /// `map`. [M]
Method b"method", /// `method`. [M]
Module b"module", /// `module`. [M]
Once b"once", /// `once`. [M]
Open b"open", /// `open`. [M]
Static b"static", /// `static`. [M]
Type b"type", /// `type`. [M]
Var b"var", /// `var`. [M]
Vector b"vector", /// `vector`. [M]
}
}
impl From<Keyword> for Str {
fn from(kw: Keyword) -> Str {
kw.name().into()
}
}
impl From<Keyword> for Name {
fn from(kw: Keyword) -> Name {
kw.name().into()
}
}
impl Localize for Keyword {
fn fmt_localized(&self, f: &mut fmt::Formatter, locale: Locale) -> fmt::Result {
let name = str::from_utf8(self.name()).unwrap();
match &locale[..] {
"ko" => write!(f, "예약어 `{}`", name),
_ => write!(f, "a keyword `{}`", name),
}
}
}
mod lexer;
mod nesting;
pub use self::lexer::Lexer;
pub use self::nesting::{Nest, NestedToken, NestingCategory, NestingSerial};
| Tok | identifier_name |
mod.rs | //! Lexical analysis.
use std::str;
use std::fmt;
use kailua_diag::{Locale, Localize, Localized};
use string::{Name, Str};
/// A token.
#[derive(Clone, Debug, PartialEq)]
pub enum Tok {
/// A token which is distinct from all other tokens.
///
/// The lexer emits this token on an error.
Error,
/// A comment token. The parser should ignore this.
///
/// The shebang line (the first line starting with `#`) is also considered as a comment.
Comment,
/// A punctuation.
Punct(Punct),
/// A keyword.
Keyword(Keyword),
/// A number.
Num(f64),
/// A name (either an identifier or a quoted name in the meta block).
Name(Name),
/// A string (either `"string"` or `[[string]]`).
Str(Str),
/// The end of file.
///
/// A valid stream of tokens is expected to have only one EOF token at the end.
EOF,
}
impl Localize for Tok {
fn fmt_localized(&self, f: &mut fmt::Formatter, locale: Locale) -> fmt::Result {
match (&locale[..], self) {
("ko", &Tok::Error) => write!(f, "잘못된 문자"),
(_, &Tok::Error) => write!(f, "an invalid character"),
("ko", &Tok::Comment) => write!(f, "주석"),
(_, &Tok::Comment) => write!(f, "a comment"),
(_, &Tok::Punct(p)) => write!(f, "{}", Localized::new(&p, locale)),
(_, &Tok::Keyword(w)) => write!(f, "{}", Localized::new(&w, locale)),
("ko", &Tok::Num(_)) => write!(f, "숫자"),
(_, &Tok::Num(_)) => write!(f, "a number"),
("ko", &Tok::Name(_)) => write!(f, "이름"),
(_, &Tok::Name(_)) => write!(f, "a name"),
("ko", &Tok::Str(_)) => write!(f, "문자열 리터럴"),
(_, &Tok::Str(_)) => write!(f, "a string literal"),
("ko", &Tok::EOF) => write!(f, "파일의 끝"),
(_, &Tok::EOF) => write!(f, "the end of file"),
}
}
}
impl<'a> Localize for &'a Tok {
fn fmt_localized(&self, f: &mut fmt::Formatter, locale: Locale) -> fmt::Result {
(**self).fmt_localized(f, locale)
}
}
macro_rules! define_puncts {
($ty:ident |$locale:ident|: $($i:ident $t:expr, #[$m:meta])*) => (
/// A punctuation.
///
/// This includes Kailua-specific punctuations,
/// which are only generated in the meta block (marked as [M] below).
/// Some of them are also only generated after a particular Lua version
/// (marked as [5.x+] below).
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum $ty { $(#[$m] $i,)* }
impl Localize for $ty {
fn fmt_localized(&self, f: &mut fmt::Formatter, $locale: Locale) -> fmt::Result {
let text = match *self { $($ty::$i => $t,)* };
fmt::Display::fmt(text, f)
}
}
);
}
define_puncts! { Punct |locale|:
Plus "`+`", /// `+`.
Dash "`-`", /// `-`.
Star "`*`", /// `*`.
Slash "`/`", /// `/`.
Percent "`%`", /// `%`.
Caret "`^`", /// `^`.
Hash "`#`", /// `#`.
EqEq "`==`", /// `==`.
TildeEq "`~=`", /// `~=`.
LtEq "`<=`", /// `<=`.
GtEq "`>=`", /// `>=`.
Lt "`<`", /// `<`.
Gt "`>`", /// `>`.
Eq "`=`", /// `=`.
Amp "`&`", /// `&`. [5.3+]
Tilde "`~`", /// `~`. [5.3+]
Pipe "`|`", /// `|`. [5.3+ or M]
LtLt "`<<`", /// `<<`. [5.3+]
GtGt "`>>`", /// `>>`. [5.3+]
SlashSlash "`//`", /// `//`. [5.3+]
LParen "`(`", /// `(`.
RParen "`)`", /// `)`.
LBrace "`{`", /// `{`.
RBrace "`}`", /// `}`.
LBracket "`[`", /// `[`.
RBracket "`]`", /// `]`.
Semicolon "`;`", /// `;`.
Colon "`:`", /// `:`.
ColonColon "`::`", /// `::`. [5.2+]
Comma "`,`", /// `,`.
Dot "`.`", /// `.`.
DotDot "`..`", /// `..`.
DotDotDot "`...`", /// `...`.
// Kailua extensions
DashDashHash "`--#`", /// `--#`. [M]
DashDashV "`--v`", /// `--v`. [M]
DashDashColon "`--:`", /// `--:`. [M]
DashDashGt "`-->`", /// `-->`. [M]
Ques "`?`", /// `?`. [M]
Bang "`!`", /// `!`. [M]
Newline match &locale[..] { "ko" => "개행문자", _ => "a newline" },
/// A newline. Only generated at the end of the meta block.
}
macro_rules! define_keywords {
($ty:ident: everywhere { $($i:ident $t:expr, #[$m:meta])* }
meta_only { $($mi:ident $mt:expr, #[$mm:meta])* }) => (
/// A keyword.
///
/// This includes Kailua-specific keywords,
/// which are only generated in the meta block (marked as [M] below).
/// Some of them are also only generated after a particular Lua version
/// (marked as [5.x+] below).
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum $ty { $(#[$m] $i,)* $(#[$mm] $mi,)* }
impl $ty {
pub fn from(s: &[u8], in_meta: bool) -> Option<Keyword> {
match (in_meta, s) {
$((_, $t) => Some(Keyword::$i),)*
$((true, $mt) => Some(Keyword::$mi),)*
(_, _) => None,
}
}
pub fn name(&self) -> &'static [u8] {
match *self { $($ty::$i => $t,)* $($ty::$mi => $mt,)* }
}
}
);
}
define_keywords! { Keyword:
everywhere {
And b"and", /// `and`.
Break b"break", /// `break`.
Do b"do", /// `do`.
Else b"else", /// `else`.
Elseif b"elseif", /// `elseif`.
End b"end", /// `end`.
False b"false", /// `false`.
For b"for", /// `for`.
Function b"function", /// `function`.
Goto b"goto", /// `goto`. [5.2+; a normal identifier in Lua 5.1]
If b"if", /// `if`.
In b"in", /// `in`.
Local b"local", /// `local`.
Nil b"nil", /// `nil`.
Not b"not", /// `not`.
Or b"or", /// `or`.
Repeat b"repeat", /// `repeat`.
Return b"return", /// `return`.
Then b"then", /// `then`.
True b"true", /// `true`.
Until b"until", /// `until`.
While b"while", /// `while`.
}
meta_only { // Kailua extensions
Assume b"assume", /// `assume`. [M]
Class b"class", /// `class`. [M]
Const b"const", /// `const`. [M]
Global b"global", /// `global`. [M]
Map b"map", /// `map`. [M]
Method b"method", /// `method`. [M]
Module b"module", /// `module`. [M]
Once b"once", /// `once`. [M]
Open b"open", /// `open`. [M]
Static b"static", /// `static`. [M]
Type b"type", /// `type`. [M]
Var b"var", /// `var`. [M]
Vector b"vector", /// `vector`. [M]
}
}
impl From<Keyword> for Str {
fn from(kw: Keyword) -> Str {
kw.name().into()
}
}
impl From<Keyw | Keyword) -> Name {
kw.name().into()
}
}
impl Localize for Keyword {
fn fmt_localized(&self, f: &mut fmt::Formatter, locale: Locale) -> fmt::Result {
let name = str::from_utf8(self.name()).unwrap();
match &locale[..] {
"ko" => write!(f, "예약어 `{}`", name),
_ => write!(f, "a keyword `{}`", name),
}
}
}
mod lexer;
mod nesting;
pub use self::lexer::Lexer;
pub use self::nesting::{Nest, NestedToken, NestingCategory, NestingSerial};
| ord> for Name {
fn from(kw: | identifier_body |
sha1.rs | // Implements http://rosettacode.org/wiki/SHA-1
// straight port from golang crypto/sha1
// library implementation
#![feature(core)]
use std::num::Wrapping as wr;
use std::slice::bytes::copy_memory;
use std::io::{Write, Result};
// The size of a SHA1 checksum in bytes.
const SIZE: usize = 20;
// The blocksize of SHA1 in bytes.
const CHUNK:usize = 64;
const INIT:[wr<u32>; 5] = [wr(0x67452301),wr(0xEFCDAB89), wr(0x98BADCFE),
wr(0x10325476), wr(0xC3D2E1F0)];
#[cfg(not(test))]
fn main() {
let mut d = Digest::new();
let _ = write!(&mut d, "The quick brown fox jumps over the lazy dog");
let sha1=d.sha1();
for h in &sha1 {
print!("{:x} ", *h);
}
}
// digest represents the partial evaluation of a checksum.
struct Digest {
h: [wr<u32>; 5],
x: [u8; CHUNK],
nx: usize,
len: u64
}
impl Digest {
fn new() -> Digest {
Digest {
h: INIT,
x: [0u8; CHUNK],
nx: 0,
len:0u64
}
}
fn sha1(&mut self) -> [u8; SIZE] {
let mut len = self.len;
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
let mut tmp : [u8; 64] = [0u8; 64];
tmp[0] = 0x80u8;
let m:usize=(len%64u64) as usize;
if m < 56 {
self.write_all(&tmp[0..56-m]).unwrap();
} else {
self.write_all(&tmp[0..64+56-m]).unwrap();
}
// Length in bits (=lengh in bytes*8=shift 3 bits to the right).
len = len << 3;
for i in (0..8) {
tmp[i] = (len >> (56 - 8*i)) as u8;
}
self.write_all(&tmp[0..8]).unwrap();
assert!(self.nx == 0);
let mut digest : [u8; SIZE]=[0u8; SIZE];
for (i, s) in self.h.iter().enumerate() {
digest[i*4] = (*s >> 24).0 as u8;
digest[i*4+1] = (*s >> 16).0 as u8;
digest[i*4+2] = (*s >> 8).0 as u8;
digest[i*4+3] = s.0 as u8;
}
digest
}
fn process_block(&self, data:&[u8]) -> [wr<u32>; 5]{
let k:[u32; 4] = [0x5A827999, 0x6ED9EBA1, 0x8F1BBCDC, 0xCA62C1D6];
#[inline]
fn part(a: wr<u32>, b: wr<u32>) -> (wr<u32>, wr<u32>) {
((a<<5 | a>>(32-5)), (b<<30 | b>>(32-30)))
}
let mut w :[u32; 16] = [0u32; 16];
let (mut h0, mut h1, mut h2, mut h3, mut h4) =
(self.h[0], self.h[1], self.h[2], self.h[3], self.h[4]);
let mut p = data;
while p.len() >= CHUNK {
for i in (0..16) {
let j = i * 4;
w[i] = (p[j] as u32)<<24 |
(p[j+1] as u32)<<16 |
(p[j+2] as u32) <<8 |
p[j+3] as u32;
}
let (mut a, mut b, mut c, mut d, mut e) = (h0, h1, h2, h3, h4);
for i in (0..16) {
let f = b & c | (!b) & d;
let (a5, b30) = part(a, b);
let t = a5 + f + e + wr(w[i&0xf]) + wr(k[0]);
b=a; a=t; e=d; d=c; c=b30;
}
for i in (16..20) {
let tmp = w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf];
w[i&0xf] = tmp<<1 | tmp>>(32-1);
let f = b & c | (!b) & d;
let (a5, b30) = part(a, b);
let t = a5 + f + e + wr(w[i&0xf]) + wr(k[0]);
b=a; a=t; e=d; d=c; c=b30;
}
for i in (20..40) {
let tmp = w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf];
w[i&0xf] = tmp<<1 | tmp>>(32-1);
let f = b ^ c ^ d;
let (a5, b30) = part(a, b);
let t = a5 + f + e + wr(w[i&0xf]) + wr(k[1]);
b=a; a=t; e=d; d=c; c=b30;
}
for i in (40..60) {
let tmp = w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf];
w[i&0xf] = tmp<<1 | tmp>>(32-1);
let f = ((b | c) & d) | (b & c);
let (a5, b30) = part(a, b);
let t = a5 + f + e + wr(w[i&0xf]) + wr(k[2]);
b=a; a=t; e=d; d=c; c=b30;
}
for i in (60..80) {
let tmp = w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf];
w[i&0xf] = tmp<<1 | tmp>>(32-1);
let f = b ^ c ^ d;
let (a5, b30) = part(a, b);
let t = a5 + f + e + wr(w[i&0xf]) + wr(k[3]);
b=a; a=t; e=d; d=c; c=b30;
}
h0 = h0 + a;
h1 = h1 + b;
h2 = h2 + c;
h3 = h3 + d;
h4 = h4 + e;
p = &p[CHUNK..];
}
[h0, h1, h2, h3, h4]
}
}
impl Write for Digest {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
try!(self.write_all(buf));
Ok(buf.len())
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> Result<()> {
let mut buf_m = buf;
self.len += buf_m.len() as u64;
if self.nx > 0 {
let mut n = buf_m.len();
if n > CHUNK - self.nx {
n = CHUNK - self.nx;
}
for i in (0..n) {
self.x[self.nx + i] = *buf_m.get(i).unwrap();
}
self.nx += n;
if self.nx == CHUNK {
let x = &(self.x[..]);
self.h=self.process_block(x);
self.nx = 0;
}
buf_m = &buf_m[n..];
}
if buf_m.len() >= CHUNK {
let n = buf_m.len() &!(CHUNK - 1);
let x = &(self.x[n..]);
self.h=self.process_block(x); | assert!(self.x.len() >= ln);
copy_memory(buf_m, &mut self.x);
self.nx = ln;
}
Ok(())
}
fn flush(&mut self) -> Result<()> { Ok(()) }
}
#[test]
fn known_sha1s() {
let input_output = [
(
"His money is twice tainted: 'taint yours and 'taint mine.",
[0x59u8, 0x7f, 0x6a, 0x54, 0x0, 0x10, 0xf9, 0x4c,
0x15, 0xd7, 0x18, 0x6, 0xa9, 0x9a, 0x2c, 0x87, 0x10,
0xe7, 0x47, 0xbd]
),
(
"The quick brown fox jumps over the lazy dog",
[0x2fu8, 0xd4, 0xe1, 0xc6, 0x7a, 0x2d,
0x28, 0xfc, 0xed, 0x84, 0x9e, 0xe1, 0xbb, 0x76
, 0xe7, 0x39, 0x1b, 0x93, 0xeb, 0x12]
),
(
"The quick brown fox jumps over the lazy cog",
[0xdeu8,0x9f,0x2c,0x7f,0xd2,0x5e,0x1b,0x3a
,0xfa,0xd3,0xe8,0x5a,0x0b,0xd1,0x7d,0x9b
,0x10,0x0d,0xb4,0xb3]
)];
for &(i, o) in &input_output {
let mut d = Digest::new();
let _ = write!(&mut d, "{}", i);
let sha1=d.sha1();
assert_eq!(sha1, o);
}
} | buf_m = &buf_m[n..];
}
let ln=buf_m.len();
if ln > 0 { | random_line_split |
sha1.rs | // Implements http://rosettacode.org/wiki/SHA-1
// straight port from golang crypto/sha1
// library implementation
#![feature(core)]
use std::num::Wrapping as wr;
use std::slice::bytes::copy_memory;
use std::io::{Write, Result};
// The size of a SHA1 checksum in bytes.
const SIZE: usize = 20;
// The blocksize of SHA1 in bytes.
const CHUNK:usize = 64;
const INIT:[wr<u32>; 5] = [wr(0x67452301),wr(0xEFCDAB89), wr(0x98BADCFE),
wr(0x10325476), wr(0xC3D2E1F0)];
#[cfg(not(test))]
fn main() {
let mut d = Digest::new();
let _ = write!(&mut d, "The quick brown fox jumps over the lazy dog");
let sha1=d.sha1();
for h in &sha1 {
print!("{:x} ", *h);
}
}
// digest represents the partial evaluation of a checksum.
struct Digest {
h: [wr<u32>; 5],
x: [u8; CHUNK],
nx: usize,
len: u64
}
impl Digest {
fn new() -> Digest {
Digest {
h: INIT,
x: [0u8; CHUNK],
nx: 0,
len:0u64
}
}
fn sha1(&mut self) -> [u8; SIZE] {
let mut len = self.len;
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
let mut tmp : [u8; 64] = [0u8; 64];
tmp[0] = 0x80u8;
let m:usize=(len%64u64) as usize;
if m < 56 {
self.write_all(&tmp[0..56-m]).unwrap();
} else {
self.write_all(&tmp[0..64+56-m]).unwrap();
}
// Length in bits (=lengh in bytes*8=shift 3 bits to the right).
len = len << 3;
for i in (0..8) {
tmp[i] = (len >> (56 - 8*i)) as u8;
}
self.write_all(&tmp[0..8]).unwrap();
assert!(self.nx == 0);
let mut digest : [u8; SIZE]=[0u8; SIZE];
for (i, s) in self.h.iter().enumerate() {
digest[i*4] = (*s >> 24).0 as u8;
digest[i*4+1] = (*s >> 16).0 as u8;
digest[i*4+2] = (*s >> 8).0 as u8;
digest[i*4+3] = s.0 as u8;
}
digest
}
fn process_block(&self, data:&[u8]) -> [wr<u32>; 5]{
let k:[u32; 4] = [0x5A827999, 0x6ED9EBA1, 0x8F1BBCDC, 0xCA62C1D6];
#[inline]
fn part(a: wr<u32>, b: wr<u32>) -> (wr<u32>, wr<u32>) {
((a<<5 | a>>(32-5)), (b<<30 | b>>(32-30)))
}
let mut w :[u32; 16] = [0u32; 16];
let (mut h0, mut h1, mut h2, mut h3, mut h4) =
(self.h[0], self.h[1], self.h[2], self.h[3], self.h[4]);
let mut p = data;
while p.len() >= CHUNK {
for i in (0..16) {
let j = i * 4;
w[i] = (p[j] as u32)<<24 |
(p[j+1] as u32)<<16 |
(p[j+2] as u32) <<8 |
p[j+3] as u32;
}
let (mut a, mut b, mut c, mut d, mut e) = (h0, h1, h2, h3, h4);
for i in (0..16) {
let f = b & c | (!b) & d;
let (a5, b30) = part(a, b);
let t = a5 + f + e + wr(w[i&0xf]) + wr(k[0]);
b=a; a=t; e=d; d=c; c=b30;
}
for i in (16..20) {
let tmp = w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf];
w[i&0xf] = tmp<<1 | tmp>>(32-1);
let f = b & c | (!b) & d;
let (a5, b30) = part(a, b);
let t = a5 + f + e + wr(w[i&0xf]) + wr(k[0]);
b=a; a=t; e=d; d=c; c=b30;
}
for i in (20..40) {
let tmp = w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf];
w[i&0xf] = tmp<<1 | tmp>>(32-1);
let f = b ^ c ^ d;
let (a5, b30) = part(a, b);
let t = a5 + f + e + wr(w[i&0xf]) + wr(k[1]);
b=a; a=t; e=d; d=c; c=b30;
}
for i in (40..60) {
let tmp = w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf];
w[i&0xf] = tmp<<1 | tmp>>(32-1);
let f = ((b | c) & d) | (b & c);
let (a5, b30) = part(a, b);
let t = a5 + f + e + wr(w[i&0xf]) + wr(k[2]);
b=a; a=t; e=d; d=c; c=b30;
}
for i in (60..80) {
let tmp = w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf];
w[i&0xf] = tmp<<1 | tmp>>(32-1);
let f = b ^ c ^ d;
let (a5, b30) = part(a, b);
let t = a5 + f + e + wr(w[i&0xf]) + wr(k[3]);
b=a; a=t; e=d; d=c; c=b30;
}
h0 = h0 + a;
h1 = h1 + b;
h2 = h2 + c;
h3 = h3 + d;
h4 = h4 + e;
p = &p[CHUNK..];
}
[h0, h1, h2, h3, h4]
}
}
impl Write for Digest {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
try!(self.write_all(buf));
Ok(buf.len())
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> Result<()> {
let mut buf_m = buf;
self.len += buf_m.len() as u64;
if self.nx > 0 {
let mut n = buf_m.len();
if n > CHUNK - self.nx {
n = CHUNK - self.nx;
}
for i in (0..n) {
self.x[self.nx + i] = *buf_m.get(i).unwrap();
}
self.nx += n;
if self.nx == CHUNK {
let x = &(self.x[..]);
self.h=self.process_block(x);
self.nx = 0;
}
buf_m = &buf_m[n..];
}
if buf_m.len() >= CHUNK {
let n = buf_m.len() &!(CHUNK - 1);
let x = &(self.x[n..]);
self.h=self.process_block(x);
buf_m = &buf_m[n..];
}
let ln=buf_m.len();
if ln > 0 |
Ok(())
}
fn flush(&mut self) -> Result<()> { Ok(()) }
}
#[test]
fn known_sha1s() {
let input_output = [
(
"His money is twice tainted: 'taint yours and 'taint mine.",
[0x59u8, 0x7f, 0x6a, 0x54, 0x0, 0x10, 0xf9, 0x4c,
0x15, 0xd7, 0x18, 0x6, 0xa9, 0x9a, 0x2c, 0x87, 0x10,
0xe7, 0x47, 0xbd]
),
(
"The quick brown fox jumps over the lazy dog",
[0x2fu8, 0xd4, 0xe1, 0xc6, 0x7a, 0x2d,
0x28, 0xfc, 0xed, 0x84, 0x9e, 0xe1, 0xbb, 0x76
, 0xe7, 0x39, 0x1b, 0x93, 0xeb, 0x12]
),
(
"The quick brown fox jumps over the lazy cog",
[0xdeu8,0x9f,0x2c,0x7f,0xd2,0x5e,0x1b,0x3a
,0xfa,0xd3,0xe8,0x5a,0x0b,0xd1,0x7d,0x9b
,0x10,0x0d,0xb4,0xb3]
)];
for &(i, o) in &input_output {
let mut d = Digest::new();
let _ = write!(&mut d, "{}", i);
let sha1=d.sha1();
assert_eq!(sha1, o);
}
}
| {
assert!(self.x.len() >= ln);
copy_memory(buf_m, &mut self.x);
self.nx = ln;
} | conditional_block |
sha1.rs | // Implements http://rosettacode.org/wiki/SHA-1
// straight port from golang crypto/sha1
// library implementation
#![feature(core)]
use std::num::Wrapping as wr;
use std::slice::bytes::copy_memory;
use std::io::{Write, Result};
// The size of a SHA1 checksum in bytes.
const SIZE: usize = 20;
// The blocksize of SHA1 in bytes.
const CHUNK:usize = 64;
const INIT:[wr<u32>; 5] = [wr(0x67452301),wr(0xEFCDAB89), wr(0x98BADCFE),
wr(0x10325476), wr(0xC3D2E1F0)];
#[cfg(not(test))]
fn main() {
let mut d = Digest::new();
let _ = write!(&mut d, "The quick brown fox jumps over the lazy dog");
let sha1=d.sha1();
for h in &sha1 {
print!("{:x} ", *h);
}
}
// digest represents the partial evaluation of a checksum.
struct Digest {
h: [wr<u32>; 5],
x: [u8; CHUNK],
nx: usize,
len: u64
}
impl Digest {
fn new() -> Digest {
Digest {
h: INIT,
x: [0u8; CHUNK],
nx: 0,
len:0u64
}
}
fn sha1(&mut self) -> [u8; SIZE] {
let mut len = self.len;
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
let mut tmp : [u8; 64] = [0u8; 64];
tmp[0] = 0x80u8;
let m:usize=(len%64u64) as usize;
if m < 56 {
self.write_all(&tmp[0..56-m]).unwrap();
} else {
self.write_all(&tmp[0..64+56-m]).unwrap();
}
// Length in bits (=lengh in bytes*8=shift 3 bits to the right).
len = len << 3;
for i in (0..8) {
tmp[i] = (len >> (56 - 8*i)) as u8;
}
self.write_all(&tmp[0..8]).unwrap();
assert!(self.nx == 0);
let mut digest : [u8; SIZE]=[0u8; SIZE];
for (i, s) in self.h.iter().enumerate() {
digest[i*4] = (*s >> 24).0 as u8;
digest[i*4+1] = (*s >> 16).0 as u8;
digest[i*4+2] = (*s >> 8).0 as u8;
digest[i*4+3] = s.0 as u8;
}
digest
}
fn process_block(&self, data:&[u8]) -> [wr<u32>; 5]{
let k:[u32; 4] = [0x5A827999, 0x6ED9EBA1, 0x8F1BBCDC, 0xCA62C1D6];
#[inline]
fn part(a: wr<u32>, b: wr<u32>) -> (wr<u32>, wr<u32>) {
((a<<5 | a>>(32-5)), (b<<30 | b>>(32-30)))
}
let mut w :[u32; 16] = [0u32; 16];
let (mut h0, mut h1, mut h2, mut h3, mut h4) =
(self.h[0], self.h[1], self.h[2], self.h[3], self.h[4]);
let mut p = data;
while p.len() >= CHUNK {
for i in (0..16) {
let j = i * 4;
w[i] = (p[j] as u32)<<24 |
(p[j+1] as u32)<<16 |
(p[j+2] as u32) <<8 |
p[j+3] as u32;
}
let (mut a, mut b, mut c, mut d, mut e) = (h0, h1, h2, h3, h4);
for i in (0..16) {
let f = b & c | (!b) & d;
let (a5, b30) = part(a, b);
let t = a5 + f + e + wr(w[i&0xf]) + wr(k[0]);
b=a; a=t; e=d; d=c; c=b30;
}
for i in (16..20) {
let tmp = w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf];
w[i&0xf] = tmp<<1 | tmp>>(32-1);
let f = b & c | (!b) & d;
let (a5, b30) = part(a, b);
let t = a5 + f + e + wr(w[i&0xf]) + wr(k[0]);
b=a; a=t; e=d; d=c; c=b30;
}
for i in (20..40) {
let tmp = w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf];
w[i&0xf] = tmp<<1 | tmp>>(32-1);
let f = b ^ c ^ d;
let (a5, b30) = part(a, b);
let t = a5 + f + e + wr(w[i&0xf]) + wr(k[1]);
b=a; a=t; e=d; d=c; c=b30;
}
for i in (40..60) {
let tmp = w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf];
w[i&0xf] = tmp<<1 | tmp>>(32-1);
let f = ((b | c) & d) | (b & c);
let (a5, b30) = part(a, b);
let t = a5 + f + e + wr(w[i&0xf]) + wr(k[2]);
b=a; a=t; e=d; d=c; c=b30;
}
for i in (60..80) {
let tmp = w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf];
w[i&0xf] = tmp<<1 | tmp>>(32-1);
let f = b ^ c ^ d;
let (a5, b30) = part(a, b);
let t = a5 + f + e + wr(w[i&0xf]) + wr(k[3]);
b=a; a=t; e=d; d=c; c=b30;
}
h0 = h0 + a;
h1 = h1 + b;
h2 = h2 + c;
h3 = h3 + d;
h4 = h4 + e;
p = &p[CHUNK..];
}
[h0, h1, h2, h3, h4]
}
}
impl Write for Digest {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
try!(self.write_all(buf));
Ok(buf.len())
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> Result<()> {
let mut buf_m = buf;
self.len += buf_m.len() as u64;
if self.nx > 0 {
let mut n = buf_m.len();
if n > CHUNK - self.nx {
n = CHUNK - self.nx;
}
for i in (0..n) {
self.x[self.nx + i] = *buf_m.get(i).unwrap();
}
self.nx += n;
if self.nx == CHUNK {
let x = &(self.x[..]);
self.h=self.process_block(x);
self.nx = 0;
}
buf_m = &buf_m[n..];
}
if buf_m.len() >= CHUNK {
let n = buf_m.len() &!(CHUNK - 1);
let x = &(self.x[n..]);
self.h=self.process_block(x);
buf_m = &buf_m[n..];
}
let ln=buf_m.len();
if ln > 0 {
assert!(self.x.len() >= ln);
copy_memory(buf_m, &mut self.x);
self.nx = ln;
}
Ok(())
}
fn | (&mut self) -> Result<()> { Ok(()) }
}
#[test]
fn known_sha1s() {
let input_output = [
(
"His money is twice tainted: 'taint yours and 'taint mine.",
[0x59u8, 0x7f, 0x6a, 0x54, 0x0, 0x10, 0xf9, 0x4c,
0x15, 0xd7, 0x18, 0x6, 0xa9, 0x9a, 0x2c, 0x87, 0x10,
0xe7, 0x47, 0xbd]
),
(
"The quick brown fox jumps over the lazy dog",
[0x2fu8, 0xd4, 0xe1, 0xc6, 0x7a, 0x2d,
0x28, 0xfc, 0xed, 0x84, 0x9e, 0xe1, 0xbb, 0x76
, 0xe7, 0x39, 0x1b, 0x93, 0xeb, 0x12]
),
(
"The quick brown fox jumps over the lazy cog",
[0xdeu8,0x9f,0x2c,0x7f,0xd2,0x5e,0x1b,0x3a
,0xfa,0xd3,0xe8,0x5a,0x0b,0xd1,0x7d,0x9b
,0x10,0x0d,0xb4,0xb3]
)];
for &(i, o) in &input_output {
let mut d = Digest::new();
let _ = write!(&mut d, "{}", i);
let sha1=d.sha1();
assert_eq!(sha1, o);
}
}
| flush | identifier_name |
alignment-gep-tup-like-1.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct | <A,B> {
a: A, b: B
}
fn f<A:Copy +'static>(a: A, b: u16) -> @fn() -> (A, u16) {
let result: @fn() -> (A, u16) = || (copy a, b);
result
}
pub fn main() {
let (a, b) = f(22_u64, 44u16)();
info!("a=%? b=%?", a, b);
assert_eq!(a, 22u64);
assert_eq!(b, 44u16);
}
| pair | identifier_name |
alignment-gep-tup-like-1.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct pair<A,B> {
a: A, b: B
}
fn f<A:Copy +'static>(a: A, b: u16) -> @fn() -> (A, u16) {
let result: @fn() -> (A, u16) = || (copy a, b);
result | pub fn main() {
let (a, b) = f(22_u64, 44u16)();
info!("a=%? b=%?", a, b);
assert_eq!(a, 22u64);
assert_eq!(b, 44u16);
} | }
| random_line_split |
glyph.rs | // This whole file is strongly inspired by: https://github.com/jeaye/q3/blob/master/src/client/ui/ttf/glyph.rs
// available under the BSD-3 licence.
// It has been modified to work with gl-rs, nalgebra, and rust-freetype
use na::Vector2;
/// A ttf glyph.
pub struct | {
#[doc(hidden)]
pub tex: Vector2<f32>,
#[doc(hidden)]
pub advance: Vector2<f32>,
#[doc(hidden)]
pub dimensions: Vector2<f32>,
#[doc(hidden)]
pub offset: Vector2<f32>,
#[doc(hidden)]
pub buffer: Vec<u8>,
}
impl Glyph {
/// Creates a new empty glyph.
pub fn new(
tex: Vector2<f32>,
advance: Vector2<f32>,
dimensions: Vector2<f32>,
offset: Vector2<f32>,
buffer: Vec<u8>,
) -> Glyph {
Glyph {
tex,
advance,
dimensions,
offset,
buffer,
}
}
}
| Glyph | identifier_name |
glyph.rs | // This whole file is strongly inspired by: https://github.com/jeaye/q3/blob/master/src/client/ui/ttf/glyph.rs
// available under the BSD-3 licence.
// It has been modified to work with gl-rs, nalgebra, and rust-freetype
use na::Vector2;
/// A ttf glyph.
pub struct Glyph {
#[doc(hidden)]
pub tex: Vector2<f32>,
#[doc(hidden)]
pub advance: Vector2<f32>,
#[doc(hidden)]
pub dimensions: Vector2<f32>,
#[doc(hidden)]
pub offset: Vector2<f32>,
#[doc(hidden)]
pub buffer: Vec<u8>,
}
impl Glyph {
/// Creates a new empty glyph.
pub fn new(
tex: Vector2<f32>,
advance: Vector2<f32>,
dimensions: Vector2<f32>,
offset: Vector2<f32>,
buffer: Vec<u8>,
) -> Glyph |
}
| {
Glyph {
tex,
advance,
dimensions,
offset,
buffer,
}
} | identifier_body |
glyph.rs | // This whole file is strongly inspired by: https://github.com/jeaye/q3/blob/master/src/client/ui/ttf/glyph.rs
// available under the BSD-3 licence.
// It has been modified to work with gl-rs, nalgebra, and rust-freetype
use na::Vector2;
/// A ttf glyph.
pub struct Glyph {
#[doc(hidden)]
pub tex: Vector2<f32>,
#[doc(hidden)]
pub advance: Vector2<f32>,
#[doc(hidden)]
pub dimensions: Vector2<f32>,
#[doc(hidden)]
pub offset: Vector2<f32>,
#[doc(hidden)]
pub buffer: Vec<u8>,
}
impl Glyph {
/// Creates a new empty glyph.
pub fn new(
tex: Vector2<f32>,
advance: Vector2<f32>,
dimensions: Vector2<f32>,
offset: Vector2<f32>,
buffer: Vec<u8>,
) -> Glyph {
Glyph {
tex,
advance,
dimensions,
offset,
buffer, | }
}
} | random_line_split |
|
peripheral.rs | use bare_metal::{CriticalSection, Mutex};
use once_cell::unsync::OnceCell;
static PERIPHERALS: Mutex<OnceCell<At2XtPeripherals>> = Mutex::new(OnceCell::new());
pub struct At2XtPeripherals {
pub port: msp430g2211::PORT_1_2,
pub timer: msp430g2211::TIMER_A2,
}
impl AsRef<msp430g2211::PORT_1_2> for At2XtPeripherals {
fn as_ref(&self) -> &msp430g2211::PORT_1_2 {
&self.port
}
}
impl AsRef<msp430g2211::TIMER_A2> for At2XtPeripherals {
fn as_ref(&self) -> &msp430g2211::TIMER_A2 {
&self.timer |
impl At2XtPeripherals {
pub fn init(self, cs: &CriticalSection) -> Result<(), ()> {
// We want to consume our Peripherals struct so interrupts
// and the main thread can access the peripherals; OnceCell
// returns the data to you on error.
PERIPHERALS.borrow(*cs).set(self).map_err(|_e| {})
}
pub fn periph_ref<'a, T>(cs: &'a CriticalSection) -> Option<&'a T>
where
Self: AsRef<T>,
{
PERIPHERALS.borrow(*cs).get().map(|p| p.as_ref())
}
} | }
} | random_line_split |
peripheral.rs | use bare_metal::{CriticalSection, Mutex};
use once_cell::unsync::OnceCell;
static PERIPHERALS: Mutex<OnceCell<At2XtPeripherals>> = Mutex::new(OnceCell::new());
pub struct | {
pub port: msp430g2211::PORT_1_2,
pub timer: msp430g2211::TIMER_A2,
}
impl AsRef<msp430g2211::PORT_1_2> for At2XtPeripherals {
fn as_ref(&self) -> &msp430g2211::PORT_1_2 {
&self.port
}
}
impl AsRef<msp430g2211::TIMER_A2> for At2XtPeripherals {
fn as_ref(&self) -> &msp430g2211::TIMER_A2 {
&self.timer
}
}
impl At2XtPeripherals {
pub fn init(self, cs: &CriticalSection) -> Result<(), ()> {
// We want to consume our Peripherals struct so interrupts
// and the main thread can access the peripherals; OnceCell
// returns the data to you on error.
PERIPHERALS.borrow(*cs).set(self).map_err(|_e| {})
}
pub fn periph_ref<'a, T>(cs: &'a CriticalSection) -> Option<&'a T>
where
Self: AsRef<T>,
{
PERIPHERALS.borrow(*cs).get().map(|p| p.as_ref())
}
}
| At2XtPeripherals | identifier_name |
dst-rvalue.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Check that dynamically sized rvalues are forbidden
#![feature(box_syntax)]
pub fn | () {
let _x: Box<str> = box *"hello world";
//~^ ERROR E0161
//~^^ ERROR cannot move out of borrowed content
let array: &[isize] = &[1, 2, 3];
let _x: Box<[isize]> = box *array;
//~^ ERROR E0161
//~^^ ERROR cannot move out of borrowed content
}
| main | identifier_name |
dst-rvalue.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Check that dynamically sized rvalues are forbidden
#![feature(box_syntax)]
pub fn main() {
let _x: Box<str> = box *"hello world"; | //~^ ERROR E0161
//~^^ ERROR cannot move out of borrowed content
let array: &[isize] = &[1, 2, 3];
let _x: Box<[isize]> = box *array;
//~^ ERROR E0161
//~^^ ERROR cannot move out of borrowed content
} | random_line_split |
|
dst-rvalue.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Check that dynamically sized rvalues are forbidden
#![feature(box_syntax)]
pub fn main() | {
let _x: Box<str> = box *"hello world";
//~^ ERROR E0161
//~^^ ERROR cannot move out of borrowed content
let array: &[isize] = &[1, 2, 3];
let _x: Box<[isize]> = box *array;
//~^ ERROR E0161
//~^^ ERROR cannot move out of borrowed content
} | identifier_body |
|
textattributes.rs | // This file is part of rgtk.
//
// rgtk is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// rgtk is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with rgtk. If not, see <http://www.gnu.org/licenses/>. | use gtk::ffi;
pub struct TextAttributes {
pointer: *mut ffi::C_GtkTextAttributes
}
impl TextAttributes {
pub fn new() -> Option<TextAttributes> {
let tmp_pointer = unsafe { ffi::gtk_text_attributes_new() };
if tmp_pointer.is_null() {
None
} else {
Some(TextAttributes { pointer : tmp_pointer })
}
}
pub fn copy(&self) -> Option<TextAttributes> {
let tmp_pointer = unsafe { ffi::gtk_text_attributes_copy(self.pointer) };
if tmp_pointer.is_null() {
None
} else {
Some(TextAttributes { pointer : tmp_pointer })
}
}
pub fn copy_values_from(&self, src: &TextAttributes) {
unsafe { ffi::gtk_text_attributes_copy_values(src.pointer, self.pointer) }
}
pub fn unref(&self) {
unsafe { ffi::gtk_text_attributes_unref(self.pointer) }
}
pub fn _ref(&self) -> Option<TextAttributes> {
let tmp_pointer = unsafe { ffi::gtk_text_attributes_ref(self.pointer) };
if tmp_pointer.is_null() {
None
} else {
Some(TextAttributes { pointer : tmp_pointer })
}
}
}
impl_GObjectFunctions!(TextAttributes, C_GtkTextAttributes) |
//! GtkTextTag — A tag that can be applied to text in a GtkTextBuffer
| random_line_split |
textattributes.rs | // This file is part of rgtk.
//
// rgtk is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// rgtk is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with rgtk. If not, see <http://www.gnu.org/licenses/>.
//! GtkTextTag — A tag that can be applied to text in a GtkTextBuffer
use gtk::ffi;
pub struct TextAttributes {
pointer: *mut ffi::C_GtkTextAttributes
}
impl TextAttributes {
pub fn new() -> Option<TextAttributes> {
let tmp_pointer = unsafe { ffi::gtk_text_attributes_new() };
if tmp_pointer.is_null() {
None
} else {
Some(TextAttributes { pointer : tmp_pointer })
}
}
pub fn copy(&self) -> Option<TextAttributes> {
let tmp_pointer = unsafe { ffi::gtk_text_attributes_copy(self.pointer) };
if tmp_pointer.is_null() {
None
} else {
Some(TextAttributes { pointer : tmp_pointer })
}
}
pub fn copy_values_from(&self, src: &TextAttributes) {
unsafe { ffi::gtk_text_attributes_copy_values(src.pointer, self.pointer) }
}
pub fn unref(&self) {
unsafe { ffi::gtk_text_attributes_unref(self.pointer) }
}
pub fn _ref(&self) -> Option<TextAttributes> {
let tmp_pointer = unsafe { ffi::gtk_text_attributes_ref(self.pointer) };
if tmp_pointer.is_null() {
None
} else {
| }
}
impl_GObjectFunctions!(TextAttributes, C_GtkTextAttributes) | Some(TextAttributes { pointer : tmp_pointer })
}
| conditional_block |
textattributes.rs | // This file is part of rgtk.
//
// rgtk is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// rgtk is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with rgtk. If not, see <http://www.gnu.org/licenses/>.
//! GtkTextTag — A tag that can be applied to text in a GtkTextBuffer
use gtk::ffi;
pub struct TextAttributes {
pointer: *mut ffi::C_GtkTextAttributes
}
impl TextAttributes {
pub fn new() -> Option<TextAttributes> {
let tmp_pointer = unsafe { ffi::gtk_text_attributes_new() };
if tmp_pointer.is_null() {
None
} else {
Some(TextAttributes { pointer : tmp_pointer })
}
}
pub fn copy(&self) -> Option<TextAttributes> {
let tmp_pointer = unsafe { ffi::gtk_text_attributes_copy(self.pointer) };
if tmp_pointer.is_null() {
None
} else {
Some(TextAttributes { pointer : tmp_pointer })
}
}
pub fn copy_values_from(&self, src: &TextAttributes) {
unsafe { ffi::gtk_text_attributes_copy_values(src.pointer, self.pointer) }
}
pub fn unref(&self) {
unsafe { ffi::gtk_text_attributes_unref(self.pointer) }
}
pub fn _r | self) -> Option<TextAttributes> {
let tmp_pointer = unsafe { ffi::gtk_text_attributes_ref(self.pointer) };
if tmp_pointer.is_null() {
None
} else {
Some(TextAttributes { pointer : tmp_pointer })
}
}
}
impl_GObjectFunctions!(TextAttributes, C_GtkTextAttributes) | ef(& | identifier_name |
f10-read-timeout.rs | /// Figure 10.10: Calling read with a timeout
///
/// Takeaway: First I tried with the regular `signal` function of libc
/// only to find out that the alarm signal does not interrupt the read
/// call. Digging into the C code it got obvious that the signal function
/// gets overriden by `lib/signal.c` which is a "reliable version of signal(),
/// using POSIX sigaction()". But this function gets only introduced in
/// Figure 10.18. This was quite misleading IMO.
///
/// $ f10-read-timeout 2>&1
/// read error!
/// ERROR: return code 1
extern crate libc;
#[macro_use(as_void)]
extern crate apue;
use libc::{STDOUT_FILENO, STDIN_FILENO, SIGALRM, SIG_ERR, c_int};
use libc::{alarm, write, read, exit};
use apue::signal;
const MAXLINE: usize = 4096;
fn sig_alrm(_: c_int) {
// nothing to do, just return to interrupt the read
}
fn main() {
unsafe {
let line: [u8; MAXLINE] = std::mem::uninitialized();
if signal(SIGALRM, sig_alrm) == SIG_ERR {
panic!("signal(SIGALRM) error");
}
alarm(1);
let n = read(STDIN_FILENO, as_void!(line), MAXLINE);
if n < 0 |
alarm(0);
write(STDOUT_FILENO, as_void!(line), n as _);
}
}
| {
println!("read error!");
exit(1);
} | conditional_block |
f10-read-timeout.rs | /// Figure 10.10: Calling read with a timeout
///
/// Takeaway: First I tried with the regular `signal` function of libc
/// only to find out that the alarm signal does not interrupt the read
/// call. Digging into the C code it got obvious that the signal function | /// using POSIX sigaction()". But this function gets only introduced in
/// Figure 10.18. This was quite misleading IMO.
///
/// $ f10-read-timeout 2>&1
/// read error!
/// ERROR: return code 1
extern crate libc;
#[macro_use(as_void)]
extern crate apue;
use libc::{STDOUT_FILENO, STDIN_FILENO, SIGALRM, SIG_ERR, c_int};
use libc::{alarm, write, read, exit};
use apue::signal;
const MAXLINE: usize = 4096;
fn sig_alrm(_: c_int) {
// nothing to do, just return to interrupt the read
}
fn main() {
unsafe {
let line: [u8; MAXLINE] = std::mem::uninitialized();
if signal(SIGALRM, sig_alrm) == SIG_ERR {
panic!("signal(SIGALRM) error");
}
alarm(1);
let n = read(STDIN_FILENO, as_void!(line), MAXLINE);
if n < 0 {
println!("read error!");
exit(1);
}
alarm(0);
write(STDOUT_FILENO, as_void!(line), n as _);
}
} | /// gets overriden by `lib/signal.c` which is a "reliable version of signal(), | random_line_split |
f10-read-timeout.rs | /// Figure 10.10: Calling read with a timeout
///
/// Takeaway: First I tried with the regular `signal` function of libc
/// only to find out that the alarm signal does not interrupt the read
/// call. Digging into the C code it got obvious that the signal function
/// gets overriden by `lib/signal.c` which is a "reliable version of signal(),
/// using POSIX sigaction()". But this function gets only introduced in
/// Figure 10.18. This was quite misleading IMO.
///
/// $ f10-read-timeout 2>&1
/// read error!
/// ERROR: return code 1
extern crate libc;
#[macro_use(as_void)]
extern crate apue;
use libc::{STDOUT_FILENO, STDIN_FILENO, SIGALRM, SIG_ERR, c_int};
use libc::{alarm, write, read, exit};
use apue::signal;
const MAXLINE: usize = 4096;
fn sig_alrm(_: c_int) {
// nothing to do, just return to interrupt the read
}
fn main() | {
unsafe {
let line: [u8; MAXLINE] = std::mem::uninitialized();
if signal(SIGALRM, sig_alrm) == SIG_ERR {
panic!("signal(SIGALRM) error");
}
alarm(1);
let n = read(STDIN_FILENO, as_void!(line), MAXLINE);
if n < 0 {
println!("read error!");
exit(1);
}
alarm(0);
write(STDOUT_FILENO, as_void!(line), n as _);
}
} | identifier_body |
|
f10-read-timeout.rs | /// Figure 10.10: Calling read with a timeout
///
/// Takeaway: First I tried with the regular `signal` function of libc
/// only to find out that the alarm signal does not interrupt the read
/// call. Digging into the C code it got obvious that the signal function
/// gets overriden by `lib/signal.c` which is a "reliable version of signal(),
/// using POSIX sigaction()". But this function gets only introduced in
/// Figure 10.18. This was quite misleading IMO.
///
/// $ f10-read-timeout 2>&1
/// read error!
/// ERROR: return code 1
extern crate libc;
#[macro_use(as_void)]
extern crate apue;
use libc::{STDOUT_FILENO, STDIN_FILENO, SIGALRM, SIG_ERR, c_int};
use libc::{alarm, write, read, exit};
use apue::signal;
const MAXLINE: usize = 4096;
fn sig_alrm(_: c_int) {
// nothing to do, just return to interrupt the read
}
fn | () {
unsafe {
let line: [u8; MAXLINE] = std::mem::uninitialized();
if signal(SIGALRM, sig_alrm) == SIG_ERR {
panic!("signal(SIGALRM) error");
}
alarm(1);
let n = read(STDIN_FILENO, as_void!(line), MAXLINE);
if n < 0 {
println!("read error!");
exit(1);
}
alarm(0);
write(STDOUT_FILENO, as_void!(line), n as _);
}
}
| main | identifier_name |
lookups.rs | /// this is a table lookup for all "flush" hands (e.g. both
/// flushes and straight-flushes. entries containing a zero
/// mean that combination is not possible with a five-card
/// flush hand.
pub const FLUSHES : [u16; 7937] = include!("snip/flushes.snip");
/// this is a table lookup for all non-flush hands consisting
/// of five unique ranks (i.e. either Straights or High Card
/// hands). it's similar to the above "flushes" array.
pub const UNIQUE_5 : [u16; 7937] = include!("snip/unique5.snip");
/// those two arrays are needed for original evaluator version
pub const PRODUCTS : [u32; 4888] = include!("snip/products.snip");
pub const VALUES : [u16; 4888] = include!("snip/values.snip");
/// primes associated with card values
pub const PRIMES: [u8; 13] = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41 ];
/// permutations of 5 cards from 7, to evaluate a hand + table cards with a 5-card algorithm
pub const PERM_7 : [[u8; 5]; 21] = [
[ 0, 1, 2, 3, 4 ],
[ 0, 1, 2, 3, 5 ], | [ 0, 1, 2, 4, 5 ],
[ 0, 1, 2, 4, 6 ],
[ 0, 1, 2, 5, 6 ],
[ 0, 1, 3, 4, 5 ],
[ 0, 1, 3, 4, 6 ],
[ 0, 1, 3, 5, 6 ],
[ 0, 1, 4, 5, 6 ],
[ 0, 2, 3, 4, 5 ],
[ 0, 2, 3, 4, 6 ],
[ 0, 2, 3, 5, 6 ],
[ 0, 2, 4, 5, 6 ],
[ 0, 3, 4, 5, 6 ],
[ 1, 2, 3, 4, 5 ],
[ 1, 2, 3, 4, 6 ],
[ 1, 2, 3, 5, 6 ],
[ 1, 2, 4, 5, 6 ],
[ 1, 3, 4, 5, 6 ],
[ 2, 3, 4, 5, 6 ]
];
/// permutations to evaluate all 6 card combinations.
pub const PERM_6 : [[u8; 5]; 6] = [
[ 0, 1, 2, 3, 4 ],
[ 0, 1, 2, 3, 5 ],
[ 0, 1, 2, 4, 5 ],
[ 0, 1, 3, 4, 5 ],
[ 0, 2, 3, 4, 5 ],
[ 1, 2, 3, 4, 5 ],
];
// perfect hash specific lookups
#[allow(dead_code)]
pub const HASH_VALUES: [u16; 8192] = include!("snip/hash_values.snip");
#[allow(dead_code)]
pub const HASH_ADJUST: [u16; 512] = include!("snip/hash_adjust.snip"); | [ 0, 1, 2, 3, 6 ], | random_line_split |
test.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
pub use crate::dom::bindings::str::{ByteString, DOMString};
pub use crate::dom::headers::normalize_value;
// For compile-fail tests only.
pub use crate::dom::bindings::cell::DomRefCell;
pub use crate::dom::bindings::refcounted::TrustedPromise;
pub use crate::dom::bindings::root::Dom;
pub use crate::dom::node::Node;
pub mod area {
pub use crate::dom::htmlareaelement::{Area, Shape};
}
pub mod size_of {
use crate::dom::characterdata::CharacterData;
use crate::dom::element::Element;
use crate::dom::eventtarget::EventTarget;
use crate::dom::htmldivelement::HTMLDivElement;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::htmlspanelement::HTMLSpanElement;
use crate::dom::node::Node;
use crate::dom::text::Text;
use std::mem::size_of;
pub fn CharacterData() -> usize {
size_of::<CharacterData>()
}
pub fn Element() -> usize {
size_of::<Element>()
}
pub fn EventTarget() -> usize {
size_of::<EventTarget>()
}
pub fn HTMLDivElement() -> usize {
size_of::<HTMLDivElement>()
}
pub fn HTMLElement() -> usize {
size_of::<HTMLElement>()
}
| }
pub fn Node() -> usize {
size_of::<Node>()
}
pub fn Text() -> usize {
size_of::<Text>()
}
}
pub mod srcset {
pub use crate::dom::htmlimageelement::{parse_a_srcset_attribute, Descriptor, ImageSource};
}
pub mod timeranges {
pub use crate::dom::timeranges::TimeRangesContainer;
} | pub fn HTMLSpanElement() -> usize {
size_of::<HTMLSpanElement>() | random_line_split |
test.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
pub use crate::dom::bindings::str::{ByteString, DOMString};
pub use crate::dom::headers::normalize_value;
// For compile-fail tests only.
pub use crate::dom::bindings::cell::DomRefCell;
pub use crate::dom::bindings::refcounted::TrustedPromise;
pub use crate::dom::bindings::root::Dom;
pub use crate::dom::node::Node;
pub mod area {
pub use crate::dom::htmlareaelement::{Area, Shape};
}
pub mod size_of {
use crate::dom::characterdata::CharacterData;
use crate::dom::element::Element;
use crate::dom::eventtarget::EventTarget;
use crate::dom::htmldivelement::HTMLDivElement;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::htmlspanelement::HTMLSpanElement;
use crate::dom::node::Node;
use crate::dom::text::Text;
use std::mem::size_of;
pub fn CharacterData() -> usize {
size_of::<CharacterData>()
}
pub fn Element() -> usize {
size_of::<Element>()
}
pub fn EventTarget() -> usize {
size_of::<EventTarget>()
}
pub fn HTMLDivElement() -> usize {
size_of::<HTMLDivElement>()
}
pub fn HTMLElement() -> usize {
size_of::<HTMLElement>()
}
pub fn | () -> usize {
size_of::<HTMLSpanElement>()
}
pub fn Node() -> usize {
size_of::<Node>()
}
pub fn Text() -> usize {
size_of::<Text>()
}
}
pub mod srcset {
pub use crate::dom::htmlimageelement::{parse_a_srcset_attribute, Descriptor, ImageSource};
}
pub mod timeranges {
pub use crate::dom::timeranges::TimeRangesContainer;
}
| HTMLSpanElement | identifier_name |
test.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
pub use crate::dom::bindings::str::{ByteString, DOMString};
pub use crate::dom::headers::normalize_value;
// For compile-fail tests only.
pub use crate::dom::bindings::cell::DomRefCell;
pub use crate::dom::bindings::refcounted::TrustedPromise;
pub use crate::dom::bindings::root::Dom;
pub use crate::dom::node::Node;
pub mod area {
pub use crate::dom::htmlareaelement::{Area, Shape};
}
pub mod size_of {
use crate::dom::characterdata::CharacterData;
use crate::dom::element::Element;
use crate::dom::eventtarget::EventTarget;
use crate::dom::htmldivelement::HTMLDivElement;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::htmlspanelement::HTMLSpanElement;
use crate::dom::node::Node;
use crate::dom::text::Text;
use std::mem::size_of;
pub fn CharacterData() -> usize {
size_of::<CharacterData>()
}
pub fn Element() -> usize {
size_of::<Element>()
}
pub fn EventTarget() -> usize {
size_of::<EventTarget>()
}
pub fn HTMLDivElement() -> usize {
size_of::<HTMLDivElement>()
}
pub fn HTMLElement() -> usize {
size_of::<HTMLElement>()
}
pub fn HTMLSpanElement() -> usize {
size_of::<HTMLSpanElement>()
}
pub fn Node() -> usize {
size_of::<Node>()
}
pub fn Text() -> usize |
}
pub mod srcset {
pub use crate::dom::htmlimageelement::{parse_a_srcset_attribute, Descriptor, ImageSource};
}
pub mod timeranges {
pub use crate::dom::timeranges::TimeRangesContainer;
}
| {
size_of::<Text>()
} | identifier_body |
queue_alt.rs | /*!
Heterogeneous Queue (alternative)
This version is hand-written (no macros) but has a simpler architecture
that allows implicit consumption by deconstruction on assignment.
# Example
```rust
use heterogene::queue_alt::{Q0,Q1,Q2};
let q = ();
let q = q.append(1u);
let q = q.append('c');
let (num, q) = q;
let (ch, q) = q;
println!("Queue-alt: {} {} {}", num, ch, q);
```
*/
pub trait Q0 {
fn append<T1>(self, t1: T1) -> (T1,());
}
impl Q0 for () {
fn append<T1>(self, t1: T1) -> (T1,()) {
(t1,())
}
}
pub trait Q1<T1> {
fn append<T2>(self, t2: T2) -> (T1,(T2,()));
}
impl<T1> Q1<T1> for (T1,()) { | (t1,(t2,()))
}
}
pub trait Q2<T1,T2> {
fn append<T3>(self, t3: T3) -> (T1,(T2,(T3,())));
}
impl<T1,T2> Q2<T1,T2> for (T1,(T2,())) {
fn append<T3>(self, t3: T3) -> (T1,(T2,(T3,()))) {
let(t1,(t2,_)) = self;
(t1,(t2,(t3,())))
}
} | fn append<T2>(self, t2: T2) -> (T1,(T2,())) {
let (t1,_) = self; | random_line_split |
queue_alt.rs | /*!
Heterogeneous Queue (alternative)
This version is hand-written (no macros) but has a simpler architecture
that allows implicit consumption by deconstruction on assignment.
# Example
```rust
use heterogene::queue_alt::{Q0,Q1,Q2};
let q = ();
let q = q.append(1u);
let q = q.append('c');
let (num, q) = q;
let (ch, q) = q;
println!("Queue-alt: {} {} {}", num, ch, q);
```
*/
pub trait Q0 {
fn append<T1>(self, t1: T1) -> (T1,());
}
impl Q0 for () {
fn append<T1>(self, t1: T1) -> (T1,()) {
(t1,())
}
}
pub trait Q1<T1> {
fn append<T2>(self, t2: T2) -> (T1,(T2,()));
}
impl<T1> Q1<T1> for (T1,()) {
fn | <T2>(self, t2: T2) -> (T1,(T2,())) {
let (t1,_) = self;
(t1,(t2,()))
}
}
pub trait Q2<T1,T2> {
fn append<T3>(self, t3: T3) -> (T1,(T2,(T3,())));
}
impl<T1,T2> Q2<T1,T2> for (T1,(T2,())) {
fn append<T3>(self, t3: T3) -> (T1,(T2,(T3,()))) {
let(t1,(t2,_)) = self;
(t1,(t2,(t3,())))
}
}
| append | identifier_name |
queue_alt.rs | /*!
Heterogeneous Queue (alternative)
This version is hand-written (no macros) but has a simpler architecture
that allows implicit consumption by deconstruction on assignment.
# Example
```rust
use heterogene::queue_alt::{Q0,Q1,Q2};
let q = ();
let q = q.append(1u);
let q = q.append('c');
let (num, q) = q;
let (ch, q) = q;
println!("Queue-alt: {} {} {}", num, ch, q);
```
*/
pub trait Q0 {
fn append<T1>(self, t1: T1) -> (T1,());
}
impl Q0 for () {
fn append<T1>(self, t1: T1) -> (T1,()) {
(t1,())
}
}
pub trait Q1<T1> {
fn append<T2>(self, t2: T2) -> (T1,(T2,()));
}
impl<T1> Q1<T1> for (T1,()) {
fn append<T2>(self, t2: T2) -> (T1,(T2,())) |
}
pub trait Q2<T1,T2> {
fn append<T3>(self, t3: T3) -> (T1,(T2,(T3,())));
}
impl<T1,T2> Q2<T1,T2> for (T1,(T2,())) {
fn append<T3>(self, t3: T3) -> (T1,(T2,(T3,()))) {
let(t1,(t2,_)) = self;
(t1,(t2,(t3,())))
}
}
| {
let (t1,_) = self;
(t1,(t2,()))
} | identifier_body |
transaction_map.rs | use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied,Vacant};
use primitive::{UInt256,Transaction};
#[derive(PartialEq)]
pub enum TransactionIndexStatus {
Init = 0,
Get = 1,
}
pub struct TransactionIndex {
status: TransactionIndexStatus,
hash: UInt256,
transaction: Option<Transaction>,
waiters: Vec<UInt256>,
}
impl TransactionIndex {
pub fn new(hash: &UInt256) -> TransactionIndex {
TransactionIndex {
status: TransactionIndexStatus::Init,
hash: hash.clone(),
transaction: None,
waiters: Vec::new(),
}
}
pub fn is_init(&self) -> bool { self.status == TransactionIndexStatus::Init } | pub fn get_hash(&self) -> &UInt256 { &self.hash }
pub fn get_transaction(&self) -> &Option<Transaction> { &self.transaction }
pub fn set_transaction(&mut self, transaction: Transaction) {
self.transaction = Some(transaction);
self.status = TransactionIndexStatus::Get;
}
pub fn add_waiter(&mut self, next: UInt256) {
self.waiters.push(next);
}
pub fn move_waiters(&mut self, v:&mut Vec<UInt256>) {
v.append(&mut self.waiters);
}
}
#[derive(Default)]
pub struct TransactionMap {
map: HashMap< UInt256, TransactionIndex >,
}
impl TransactionMap {
pub fn get(&self, hash: &UInt256) -> Option<&TransactionIndex> {
self.map.get(hash)
}
pub fn get_mut(&mut self, hash: &UInt256) -> Option<&mut TransactionIndex> {
self.map.get_mut(hash)
}
pub fn insert(&mut self, hash: &UInt256) -> Result<&mut TransactionIndex, &mut TransactionIndex> {
match self.map.entry(hash.clone()) {
Vacant(v) => Ok(v.insert(TransactionIndex::new(hash))),
Occupied(o) => Err(o.into_mut())
}
}
} | random_line_split |
|
transaction_map.rs | use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied,Vacant};
use primitive::{UInt256,Transaction};
#[derive(PartialEq)]
pub enum TransactionIndexStatus {
Init = 0,
Get = 1,
}
pub struct TransactionIndex {
status: TransactionIndexStatus,
hash: UInt256,
transaction: Option<Transaction>,
waiters: Vec<UInt256>,
}
impl TransactionIndex {
pub fn new(hash: &UInt256) -> TransactionIndex {
TransactionIndex {
status: TransactionIndexStatus::Init,
hash: hash.clone(),
transaction: None,
waiters: Vec::new(),
}
}
pub fn is_init(&self) -> bool { self.status == TransactionIndexStatus::Init }
pub fn get_hash(&self) -> &UInt256 { &self.hash }
pub fn get_transaction(&self) -> &Option<Transaction> { &self.transaction }
pub fn set_transaction(&mut self, transaction: Transaction) {
self.transaction = Some(transaction);
self.status = TransactionIndexStatus::Get;
}
pub fn add_waiter(&mut self, next: UInt256) {
self.waiters.push(next);
}
pub fn move_waiters(&mut self, v:&mut Vec<UInt256>) {
v.append(&mut self.waiters);
}
}
#[derive(Default)]
pub struct TransactionMap {
map: HashMap< UInt256, TransactionIndex >,
}
impl TransactionMap {
pub fn | (&self, hash: &UInt256) -> Option<&TransactionIndex> {
self.map.get(hash)
}
pub fn get_mut(&mut self, hash: &UInt256) -> Option<&mut TransactionIndex> {
self.map.get_mut(hash)
}
pub fn insert(&mut self, hash: &UInt256) -> Result<&mut TransactionIndex, &mut TransactionIndex> {
match self.map.entry(hash.clone()) {
Vacant(v) => Ok(v.insert(TransactionIndex::new(hash))),
Occupied(o) => Err(o.into_mut())
}
}
}
| get | identifier_name |
simd_add.rs | #![feature(test)]
#![feature(core)]
use std::simd::f32x4;
macro_rules! assert_equal_len {
($a:ident, $b: ident) => {
assert!($a.len() == $b.len(),
"add_assign: dimension mismatch: {:?} += {:?}",
($a.len(),),
($b.len(),));
}
}
// element-wise addition
fn add_assign(xs: &mut Vec<f32>, ys: &Vec<f32>) {
assert_equal_len!(xs, ys);
for (x, y) in xs.iter_mut().zip(ys.iter()) {
*x += *y;
}
}
// simd accelerated addition
fn | (xs: &mut Vec<f32>, ys: &Vec<f32>) {
assert_equal_len!(xs, ys);
let size = xs.len() as isize;
let chunks = size / 4;
// pointer to the start of the vector data
let p_x: *mut f32 = xs.as_mut_ptr();
let p_y: *const f32 = ys.as_ptr();
// sum excess elements that don't fit in the simd vector
for i in (4 * chunks)..size {
// dereferencing a raw pointer requires an unsafe block
unsafe {
// offset by i elements
*p_x.offset(i) += *p_y.offset(i);
}
}
// treat f32 vector as an simd f32x4 vector
let simd_p_x = p_x as *mut f32x4;
let simd_p_y = p_y as *const f32x4;
// sum "simd vector"
for i in 0..chunks {
unsafe {
*simd_p_x.offset(i) += *simd_p_y.offset(i);
}
}
}
mod bench {
extern crate test;
use self::test::Bencher;
use std::iter;
static BENCH_SIZE: usize = 10_000;
macro_rules! bench {
($name:ident, $func:ident) => {
#[bench]
fn $name(b: &mut Bencher) {
let mut x: Vec<_> = iter::repeat(1.0f32)
.take(BENCH_SIZE)
.collect();
let y: Vec<_> = iter::repeat(1.0f32)
.take(BENCH_SIZE)
.collect();
b.iter(|| {
super::$func(&mut x, &y);
})
}
}
}
bench!(vanilla, add_assign);
bench!(simd, simd_add_assign);
}
| simd_add_assign | identifier_name |
simd_add.rs | #![feature(test)]
#![feature(core)]
use std::simd::f32x4;
macro_rules! assert_equal_len {
($a:ident, $b: ident) => {
assert!($a.len() == $b.len(),
"add_assign: dimension mismatch: {:?} += {:?}",
($a.len(),),
($b.len(),));
}
}
// element-wise addition
fn add_assign(xs: &mut Vec<f32>, ys: &Vec<f32>) |
// simd accelerated addition
fn simd_add_assign(xs: &mut Vec<f32>, ys: &Vec<f32>) {
assert_equal_len!(xs, ys);
let size = xs.len() as isize;
let chunks = size / 4;
// pointer to the start of the vector data
let p_x: *mut f32 = xs.as_mut_ptr();
let p_y: *const f32 = ys.as_ptr();
// sum excess elements that don't fit in the simd vector
for i in (4 * chunks)..size {
// dereferencing a raw pointer requires an unsafe block
unsafe {
// offset by i elements
*p_x.offset(i) += *p_y.offset(i);
}
}
// treat f32 vector as an simd f32x4 vector
let simd_p_x = p_x as *mut f32x4;
let simd_p_y = p_y as *const f32x4;
// sum "simd vector"
for i in 0..chunks {
unsafe {
*simd_p_x.offset(i) += *simd_p_y.offset(i);
}
}
}
mod bench {
extern crate test;
use self::test::Bencher;
use std::iter;
static BENCH_SIZE: usize = 10_000;
macro_rules! bench {
($name:ident, $func:ident) => {
#[bench]
fn $name(b: &mut Bencher) {
let mut x: Vec<_> = iter::repeat(1.0f32)
.take(BENCH_SIZE)
.collect();
let y: Vec<_> = iter::repeat(1.0f32)
.take(BENCH_SIZE)
.collect();
b.iter(|| {
super::$func(&mut x, &y);
})
}
}
}
bench!(vanilla, add_assign);
bench!(simd, simd_add_assign);
}
| {
assert_equal_len!(xs, ys);
for (x, y) in xs.iter_mut().zip(ys.iter()) {
*x += *y;
}
} | identifier_body |
simd_add.rs | #![feature(test)]
#![feature(core)]
use std::simd::f32x4;
macro_rules! assert_equal_len {
($a:ident, $b: ident) => {
assert!($a.len() == $b.len(),
"add_assign: dimension mismatch: {:?} += {:?}",
($a.len(),),
($b.len(),));
}
}
// element-wise addition
fn add_assign(xs: &mut Vec<f32>, ys: &Vec<f32>) {
assert_equal_len!(xs, ys);
for (x, y) in xs.iter_mut().zip(ys.iter()) {
*x += *y;
}
}
// simd accelerated addition
fn simd_add_assign(xs: &mut Vec<f32>, ys: &Vec<f32>) {
assert_equal_len!(xs, ys);
let size = xs.len() as isize;
let chunks = size / 4;
// pointer to the start of the vector data
let p_x: *mut f32 = xs.as_mut_ptr();
let p_y: *const f32 = ys.as_ptr();
// sum excess elements that don't fit in the simd vector
for i in (4 * chunks)..size {
// dereferencing a raw pointer requires an unsafe block
unsafe {
// offset by i elements
*p_x.offset(i) += *p_y.offset(i);
}
}
// treat f32 vector as an simd f32x4 vector
let simd_p_x = p_x as *mut f32x4;
let simd_p_y = p_y as *const f32x4;
| }
}
}
mod bench {
extern crate test;
use self::test::Bencher;
use std::iter;
static BENCH_SIZE: usize = 10_000;
macro_rules! bench {
($name:ident, $func:ident) => {
#[bench]
fn $name(b: &mut Bencher) {
let mut x: Vec<_> = iter::repeat(1.0f32)
.take(BENCH_SIZE)
.collect();
let y: Vec<_> = iter::repeat(1.0f32)
.take(BENCH_SIZE)
.collect();
b.iter(|| {
super::$func(&mut x, &y);
})
}
}
}
bench!(vanilla, add_assign);
bench!(simd, simd_add_assign);
} | // sum "simd vector"
for i in 0..chunks {
unsafe {
*simd_p_x.offset(i) += *simd_p_y.offset(i); | random_line_split |
context.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Context data structure used by rustpkg
use std::{io, os};
use extra::workcache;
use rustc::driver::session::{OptLevel, No};
#[deriving(Clone)]
pub struct Context {
// Config strings that the user passed in with --cfg
cfgs: ~[~str],
// Flags to pass to rustc
rustc_flags: RustcFlags,
// If use_rust_path_hack is true, rustpkg searches for sources
// in *package* directories that are in the RUST_PATH (for example,
// FOO/src/bar-0.1 instead of FOO). The flag doesn't affect where
// rustpkg stores build artifacts.
use_rust_path_hack: bool,
// The root directory containing the Rust standard libraries
sysroot: Path
}
#[deriving(Clone)]
pub struct BuildContext {
// Context for workcache
workcache_context: workcache::Context,
// Everything else
context: Context
}
impl BuildContext {
pub fn sysroot(&self) -> Path {
self.context.sysroot.clone()
}
pub fn sysroot_to_use(&self) -> Path {
self.context.sysroot_to_use()
}
/// Returns the flags to pass to rustc, as a vector of strings
pub fn flag_strs(&self) -> ~[~str] {
self.context.flag_strs()
}
pub fn compile_upto(&self) -> StopBefore {
self.context.compile_upto()
}
}
/*
Deliberately unsupported rustc flags:
--bin, --lib inferred from crate file names
-L inferred from extern mods
--out-dir inferred from RUST_PATH
--test use `rustpkg test`
-v -h --ls don't make sense with rustpkg
-W -A -D -F - use pragmas instead
rustc flags that aren't implemented yet:
--passes
--llvm-arg
--target-feature
--android-cross-path
*/
pub struct RustcFlags {
compile_upto: StopBefore,
// Linker to use with the --linker flag
linker: Option<~str>,
// Extra arguments to pass to rustc with the --link-args flag
link_args: Option<~str>,
// Optimization level. 0 = default. -O = 2.
optimization_level: OptLevel,
// True if the user passed in --save-temps
save_temps: bool,
// Target (defaults to rustc's default target)
target: Option<~str>,
// Target CPU (defaults to rustc's default target CPU)
target_cpu: Option<~str>,
// Any -Z features
experimental_features: Option<~[~str]>
}
impl Clone for RustcFlags {
fn clone(&self) -> RustcFlags {
RustcFlags {
compile_upto: self.compile_upto,
linker: self.linker.clone(),
link_args: self.link_args.clone(),
optimization_level: self.optimization_level,
save_temps: self.save_temps,
target: self.target.clone(),
target_cpu: self.target_cpu.clone(),
experimental_features: self.experimental_features.clone()
}
}
}
#[deriving(Eq)]
pub enum StopBefore {
Nothing, // compile everything
Link, // --no-link
LLVMCompileBitcode, // --emit-llvm without -S
LLVMAssemble, // -S --emit-llvm
Assemble, // -S without --emit-llvm
Trans, // --no-trans
Pretty, // --pretty
Analysis, // --parse-only
}
impl Context {
pub fn sysroot(&self) -> Path {
self.sysroot.clone()
}
/// Debugging
pub fn sysroot_str(&self) -> ~str {
self.sysroot.as_str().unwrap().to_owned()
}
// Hack so that rustpkg can run either out of a rustc target dir,
// or the host dir
pub fn sysroot_to_use(&self) -> Path {
if!in_target(&self.sysroot) {
self.sysroot.clone()
} else {
let mut p = self.sysroot.clone();
p.pop();
p.pop();
p.pop();
p
}
}
/// Returns the flags to pass to rustc, as a vector of strings
pub fn flag_strs(&self) -> ~[~str] {
self.rustc_flags.flag_strs()
}
pub fn compile_upto(&self) -> StopBefore {
self.rustc_flags.compile_upto
}
}
/// We assume that if../../rustc exists, then we're running
/// rustpkg from a Rust target directory. This is part of a
/// kludgy hack used to adjust the sysroot.
pub fn in_target(sysroot: &Path) -> bool |
impl RustcFlags {
fn flag_strs(&self) -> ~[~str] {
let linker_flag = match self.linker {
Some(ref l) => ~[~"--linker", l.clone()],
None => ~[]
};
let link_args_flag = match self.link_args {
Some(ref l) => ~[~"--link-args", l.clone()],
None => ~[]
};
let save_temps_flag = if self.save_temps { ~[~"--save-temps"] } else { ~[] };
let target_flag = match self.target {
Some(ref l) => ~[~"--target", l.clone()],
None => ~[]
};
let target_cpu_flag = match self.target_cpu {
Some(ref l) => ~[~"--target-cpu", l.clone()],
None => ~[]
};
let z_flags = match self.experimental_features {
Some(ref ls) => ls.flat_map(|s| ~[~"-Z", s.clone()]),
None => ~[]
};
linker_flag
+ link_args_flag
+ save_temps_flag
+ target_flag
+ target_cpu_flag
+ z_flags + (match self.compile_upto {
LLVMCompileBitcode => ~[~"--emit-llvm"],
LLVMAssemble => ~[~"--emit-llvm", ~"-S"],
Link => ~[~"-c"],
Trans => ~[~"--no-trans"],
Assemble => ~[~"-S"],
// n.b. Doesn't support all flavors of --pretty (yet)
Pretty => ~[~"--pretty"],
Analysis => ~[~"--parse-only"],
Nothing => ~[]
})
}
pub fn default() -> RustcFlags {
RustcFlags {
linker: None,
link_args: None,
compile_upto: Nothing,
optimization_level: No,
save_temps: false,
target: None,
target_cpu: None,
experimental_features: None
}
}
}
/// Returns true if any of the flags given are incompatible with the cmd
pub fn flags_forbidden_for_cmd(flags: &RustcFlags,
cfgs: &[~str],
cmd: &str, user_supplied_opt_level: bool) -> bool {
let complain = |s| {
println!("The {} option can only be used with the `build` command:
rustpkg [options..] build {} [package-ID]", s, s);
};
if flags.linker.is_some() && cmd!= "build" && cmd!= "install" {
io::println("The --linker option can only be used with the build or install commands.");
return true;
}
if flags.link_args.is_some() && cmd!= "build" && cmd!= "install" {
io::println("The --link-args option can only be used with the build or install commands.");
return true;
}
if!cfgs.is_empty() && cmd!= "build" && cmd!= "install" {
io::println("The --cfg option can only be used with the build or install commands.");
return true;
}
if user_supplied_opt_level && cmd!= "build" && cmd!= "install" {
io::println("The -O and --opt-level options can only be used with the build \
or install commands.");
return true;
}
if flags.save_temps && cmd!= "build" && cmd!= "install" {
io::println("The --save-temps option can only be used with the build \
or install commands.");
return true;
}
if flags.target.is_some() && cmd!= "build" && cmd!= "install" {
io::println("The --target option can only be used with the build \
or install commands.");
return true;
}
if flags.target_cpu.is_some() && cmd!= "build" && cmd!= "install" {
io::println("The --target-cpu option can only be used with the build \
or install commands.");
return true;
}
if flags.experimental_features.is_some() && cmd!= "build" && cmd!= "install" {
io::println("The -Z option can only be used with the build or install commands.");
return true;
}
match flags.compile_upto {
Link if cmd!= "build" => {
complain("--no-link");
true
}
Trans if cmd!= "build" => {
complain("--no-trans");
true
}
Assemble if cmd!= "build" => {
complain("-S");
true
}
Pretty if cmd!= "build" => {
complain("--pretty");
true
}
Analysis if cmd!= "build" => {
complain("--parse-only");
true
}
LLVMCompileBitcode if cmd!= "build" => {
complain("--emit-llvm");
true
}
LLVMAssemble if cmd!= "build" => {
complain("--emit-llvm");
true
}
_ => false
}
}
| {
debug2!("Checking whether {} is in target", sysroot.display());
let mut p = sysroot.dir_path();
p.set_filename("rustc");
os::path_is_dir(&p)
} | identifier_body |
context.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Context data structure used by rustpkg
use std::{io, os};
use extra::workcache;
use rustc::driver::session::{OptLevel, No};
#[deriving(Clone)]
pub struct | {
// Config strings that the user passed in with --cfg
cfgs: ~[~str],
// Flags to pass to rustc
rustc_flags: RustcFlags,
// If use_rust_path_hack is true, rustpkg searches for sources
// in *package* directories that are in the RUST_PATH (for example,
// FOO/src/bar-0.1 instead of FOO). The flag doesn't affect where
// rustpkg stores build artifacts.
use_rust_path_hack: bool,
// The root directory containing the Rust standard libraries
sysroot: Path
}
#[deriving(Clone)]
pub struct BuildContext {
// Context for workcache
workcache_context: workcache::Context,
// Everything else
context: Context
}
impl BuildContext {
pub fn sysroot(&self) -> Path {
self.context.sysroot.clone()
}
pub fn sysroot_to_use(&self) -> Path {
self.context.sysroot_to_use()
}
/// Returns the flags to pass to rustc, as a vector of strings
pub fn flag_strs(&self) -> ~[~str] {
self.context.flag_strs()
}
pub fn compile_upto(&self) -> StopBefore {
self.context.compile_upto()
}
}
/*
Deliberately unsupported rustc flags:
--bin, --lib inferred from crate file names
-L inferred from extern mods
--out-dir inferred from RUST_PATH
--test use `rustpkg test`
-v -h --ls don't make sense with rustpkg
-W -A -D -F - use pragmas instead
rustc flags that aren't implemented yet:
--passes
--llvm-arg
--target-feature
--android-cross-path
*/
pub struct RustcFlags {
compile_upto: StopBefore,
// Linker to use with the --linker flag
linker: Option<~str>,
// Extra arguments to pass to rustc with the --link-args flag
link_args: Option<~str>,
// Optimization level. 0 = default. -O = 2.
optimization_level: OptLevel,
// True if the user passed in --save-temps
save_temps: bool,
// Target (defaults to rustc's default target)
target: Option<~str>,
// Target CPU (defaults to rustc's default target CPU)
target_cpu: Option<~str>,
// Any -Z features
experimental_features: Option<~[~str]>
}
impl Clone for RustcFlags {
fn clone(&self) -> RustcFlags {
RustcFlags {
compile_upto: self.compile_upto,
linker: self.linker.clone(),
link_args: self.link_args.clone(),
optimization_level: self.optimization_level,
save_temps: self.save_temps,
target: self.target.clone(),
target_cpu: self.target_cpu.clone(),
experimental_features: self.experimental_features.clone()
}
}
}
#[deriving(Eq)]
pub enum StopBefore {
Nothing, // compile everything
Link, // --no-link
LLVMCompileBitcode, // --emit-llvm without -S
LLVMAssemble, // -S --emit-llvm
Assemble, // -S without --emit-llvm
Trans, // --no-trans
Pretty, // --pretty
Analysis, // --parse-only
}
impl Context {
pub fn sysroot(&self) -> Path {
self.sysroot.clone()
}
/// Debugging
pub fn sysroot_str(&self) -> ~str {
self.sysroot.as_str().unwrap().to_owned()
}
// Hack so that rustpkg can run either out of a rustc target dir,
// or the host dir
pub fn sysroot_to_use(&self) -> Path {
if!in_target(&self.sysroot) {
self.sysroot.clone()
} else {
let mut p = self.sysroot.clone();
p.pop();
p.pop();
p.pop();
p
}
}
/// Returns the flags to pass to rustc, as a vector of strings
pub fn flag_strs(&self) -> ~[~str] {
self.rustc_flags.flag_strs()
}
pub fn compile_upto(&self) -> StopBefore {
self.rustc_flags.compile_upto
}
}
/// We assume that if../../rustc exists, then we're running
/// rustpkg from a Rust target directory. This is part of a
/// kludgy hack used to adjust the sysroot.
pub fn in_target(sysroot: &Path) -> bool {
debug2!("Checking whether {} is in target", sysroot.display());
let mut p = sysroot.dir_path();
p.set_filename("rustc");
os::path_is_dir(&p)
}
impl RustcFlags {
fn flag_strs(&self) -> ~[~str] {
let linker_flag = match self.linker {
Some(ref l) => ~[~"--linker", l.clone()],
None => ~[]
};
let link_args_flag = match self.link_args {
Some(ref l) => ~[~"--link-args", l.clone()],
None => ~[]
};
let save_temps_flag = if self.save_temps { ~[~"--save-temps"] } else { ~[] };
let target_flag = match self.target {
Some(ref l) => ~[~"--target", l.clone()],
None => ~[]
};
let target_cpu_flag = match self.target_cpu {
Some(ref l) => ~[~"--target-cpu", l.clone()],
None => ~[]
};
let z_flags = match self.experimental_features {
Some(ref ls) => ls.flat_map(|s| ~[~"-Z", s.clone()]),
None => ~[]
};
linker_flag
+ link_args_flag
+ save_temps_flag
+ target_flag
+ target_cpu_flag
+ z_flags + (match self.compile_upto {
LLVMCompileBitcode => ~[~"--emit-llvm"],
LLVMAssemble => ~[~"--emit-llvm", ~"-S"],
Link => ~[~"-c"],
Trans => ~[~"--no-trans"],
Assemble => ~[~"-S"],
// n.b. Doesn't support all flavors of --pretty (yet)
Pretty => ~[~"--pretty"],
Analysis => ~[~"--parse-only"],
Nothing => ~[]
})
}
pub fn default() -> RustcFlags {
RustcFlags {
linker: None,
link_args: None,
compile_upto: Nothing,
optimization_level: No,
save_temps: false,
target: None,
target_cpu: None,
experimental_features: None
}
}
}
/// Returns true if any of the flags given are incompatible with the cmd
pub fn flags_forbidden_for_cmd(flags: &RustcFlags,
cfgs: &[~str],
cmd: &str, user_supplied_opt_level: bool) -> bool {
let complain = |s| {
println!("The {} option can only be used with the `build` command:
rustpkg [options..] build {} [package-ID]", s, s);
};
if flags.linker.is_some() && cmd!= "build" && cmd!= "install" {
io::println("The --linker option can only be used with the build or install commands.");
return true;
}
if flags.link_args.is_some() && cmd!= "build" && cmd!= "install" {
io::println("The --link-args option can only be used with the build or install commands.");
return true;
}
if!cfgs.is_empty() && cmd!= "build" && cmd!= "install" {
io::println("The --cfg option can only be used with the build or install commands.");
return true;
}
if user_supplied_opt_level && cmd!= "build" && cmd!= "install" {
io::println("The -O and --opt-level options can only be used with the build \
or install commands.");
return true;
}
if flags.save_temps && cmd!= "build" && cmd!= "install" {
io::println("The --save-temps option can only be used with the build \
or install commands.");
return true;
}
if flags.target.is_some() && cmd!= "build" && cmd!= "install" {
io::println("The --target option can only be used with the build \
or install commands.");
return true;
}
if flags.target_cpu.is_some() && cmd!= "build" && cmd!= "install" {
io::println("The --target-cpu option can only be used with the build \
or install commands.");
return true;
}
if flags.experimental_features.is_some() && cmd!= "build" && cmd!= "install" {
io::println("The -Z option can only be used with the build or install commands.");
return true;
}
match flags.compile_upto {
Link if cmd!= "build" => {
complain("--no-link");
true
}
Trans if cmd!= "build" => {
complain("--no-trans");
true
}
Assemble if cmd!= "build" => {
complain("-S");
true
}
Pretty if cmd!= "build" => {
complain("--pretty");
true
}
Analysis if cmd!= "build" => {
complain("--parse-only");
true
}
LLVMCompileBitcode if cmd!= "build" => {
complain("--emit-llvm");
true
}
LLVMAssemble if cmd!= "build" => {
complain("--emit-llvm");
true
}
_ => false
}
}
| Context | identifier_name |
context.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Context data structure used by rustpkg
use std::{io, os}; |
#[deriving(Clone)]
pub struct Context {
// Config strings that the user passed in with --cfg
cfgs: ~[~str],
// Flags to pass to rustc
rustc_flags: RustcFlags,
// If use_rust_path_hack is true, rustpkg searches for sources
// in *package* directories that are in the RUST_PATH (for example,
// FOO/src/bar-0.1 instead of FOO). The flag doesn't affect where
// rustpkg stores build artifacts.
use_rust_path_hack: bool,
// The root directory containing the Rust standard libraries
sysroot: Path
}
#[deriving(Clone)]
pub struct BuildContext {
// Context for workcache
workcache_context: workcache::Context,
// Everything else
context: Context
}
impl BuildContext {
pub fn sysroot(&self) -> Path {
self.context.sysroot.clone()
}
pub fn sysroot_to_use(&self) -> Path {
self.context.sysroot_to_use()
}
/// Returns the flags to pass to rustc, as a vector of strings
pub fn flag_strs(&self) -> ~[~str] {
self.context.flag_strs()
}
pub fn compile_upto(&self) -> StopBefore {
self.context.compile_upto()
}
}
/*
Deliberately unsupported rustc flags:
--bin, --lib inferred from crate file names
-L inferred from extern mods
--out-dir inferred from RUST_PATH
--test use `rustpkg test`
-v -h --ls don't make sense with rustpkg
-W -A -D -F - use pragmas instead
rustc flags that aren't implemented yet:
--passes
--llvm-arg
--target-feature
--android-cross-path
*/
pub struct RustcFlags {
compile_upto: StopBefore,
// Linker to use with the --linker flag
linker: Option<~str>,
// Extra arguments to pass to rustc with the --link-args flag
link_args: Option<~str>,
// Optimization level. 0 = default. -O = 2.
optimization_level: OptLevel,
// True if the user passed in --save-temps
save_temps: bool,
// Target (defaults to rustc's default target)
target: Option<~str>,
// Target CPU (defaults to rustc's default target CPU)
target_cpu: Option<~str>,
// Any -Z features
experimental_features: Option<~[~str]>
}
impl Clone for RustcFlags {
fn clone(&self) -> RustcFlags {
RustcFlags {
compile_upto: self.compile_upto,
linker: self.linker.clone(),
link_args: self.link_args.clone(),
optimization_level: self.optimization_level,
save_temps: self.save_temps,
target: self.target.clone(),
target_cpu: self.target_cpu.clone(),
experimental_features: self.experimental_features.clone()
}
}
}
#[deriving(Eq)]
pub enum StopBefore {
Nothing, // compile everything
Link, // --no-link
LLVMCompileBitcode, // --emit-llvm without -S
LLVMAssemble, // -S --emit-llvm
Assemble, // -S without --emit-llvm
Trans, // --no-trans
Pretty, // --pretty
Analysis, // --parse-only
}
impl Context {
pub fn sysroot(&self) -> Path {
self.sysroot.clone()
}
/// Debugging
pub fn sysroot_str(&self) -> ~str {
self.sysroot.as_str().unwrap().to_owned()
}
// Hack so that rustpkg can run either out of a rustc target dir,
// or the host dir
pub fn sysroot_to_use(&self) -> Path {
if!in_target(&self.sysroot) {
self.sysroot.clone()
} else {
let mut p = self.sysroot.clone();
p.pop();
p.pop();
p.pop();
p
}
}
/// Returns the flags to pass to rustc, as a vector of strings
pub fn flag_strs(&self) -> ~[~str] {
self.rustc_flags.flag_strs()
}
pub fn compile_upto(&self) -> StopBefore {
self.rustc_flags.compile_upto
}
}
/// We assume that if../../rustc exists, then we're running
/// rustpkg from a Rust target directory. This is part of a
/// kludgy hack used to adjust the sysroot.
pub fn in_target(sysroot: &Path) -> bool {
debug2!("Checking whether {} is in target", sysroot.display());
let mut p = sysroot.dir_path();
p.set_filename("rustc");
os::path_is_dir(&p)
}
impl RustcFlags {
fn flag_strs(&self) -> ~[~str] {
let linker_flag = match self.linker {
Some(ref l) => ~[~"--linker", l.clone()],
None => ~[]
};
let link_args_flag = match self.link_args {
Some(ref l) => ~[~"--link-args", l.clone()],
None => ~[]
};
let save_temps_flag = if self.save_temps { ~[~"--save-temps"] } else { ~[] };
let target_flag = match self.target {
Some(ref l) => ~[~"--target", l.clone()],
None => ~[]
};
let target_cpu_flag = match self.target_cpu {
Some(ref l) => ~[~"--target-cpu", l.clone()],
None => ~[]
};
let z_flags = match self.experimental_features {
Some(ref ls) => ls.flat_map(|s| ~[~"-Z", s.clone()]),
None => ~[]
};
linker_flag
+ link_args_flag
+ save_temps_flag
+ target_flag
+ target_cpu_flag
+ z_flags + (match self.compile_upto {
LLVMCompileBitcode => ~[~"--emit-llvm"],
LLVMAssemble => ~[~"--emit-llvm", ~"-S"],
Link => ~[~"-c"],
Trans => ~[~"--no-trans"],
Assemble => ~[~"-S"],
// n.b. Doesn't support all flavors of --pretty (yet)
Pretty => ~[~"--pretty"],
Analysis => ~[~"--parse-only"],
Nothing => ~[]
})
}
pub fn default() -> RustcFlags {
RustcFlags {
linker: None,
link_args: None,
compile_upto: Nothing,
optimization_level: No,
save_temps: false,
target: None,
target_cpu: None,
experimental_features: None
}
}
}
/// Returns true if any of the flags given are incompatible with the cmd
pub fn flags_forbidden_for_cmd(flags: &RustcFlags,
cfgs: &[~str],
cmd: &str, user_supplied_opt_level: bool) -> bool {
let complain = |s| {
println!("The {} option can only be used with the `build` command:
rustpkg [options..] build {} [package-ID]", s, s);
};
if flags.linker.is_some() && cmd!= "build" && cmd!= "install" {
io::println("The --linker option can only be used with the build or install commands.");
return true;
}
if flags.link_args.is_some() && cmd!= "build" && cmd!= "install" {
io::println("The --link-args option can only be used with the build or install commands.");
return true;
}
if!cfgs.is_empty() && cmd!= "build" && cmd!= "install" {
io::println("The --cfg option can only be used with the build or install commands.");
return true;
}
if user_supplied_opt_level && cmd!= "build" && cmd!= "install" {
io::println("The -O and --opt-level options can only be used with the build \
or install commands.");
return true;
}
if flags.save_temps && cmd!= "build" && cmd!= "install" {
io::println("The --save-temps option can only be used with the build \
or install commands.");
return true;
}
if flags.target.is_some() && cmd!= "build" && cmd!= "install" {
io::println("The --target option can only be used with the build \
or install commands.");
return true;
}
if flags.target_cpu.is_some() && cmd!= "build" && cmd!= "install" {
io::println("The --target-cpu option can only be used with the build \
or install commands.");
return true;
}
if flags.experimental_features.is_some() && cmd!= "build" && cmd!= "install" {
io::println("The -Z option can only be used with the build or install commands.");
return true;
}
match flags.compile_upto {
Link if cmd!= "build" => {
complain("--no-link");
true
}
Trans if cmd!= "build" => {
complain("--no-trans");
true
}
Assemble if cmd!= "build" => {
complain("-S");
true
}
Pretty if cmd!= "build" => {
complain("--pretty");
true
}
Analysis if cmd!= "build" => {
complain("--parse-only");
true
}
LLVMCompileBitcode if cmd!= "build" => {
complain("--emit-llvm");
true
}
LLVMAssemble if cmd!= "build" => {
complain("--emit-llvm");
true
}
_ => false
}
} | use extra::workcache;
use rustc::driver::session::{OptLevel, No}; | random_line_split |
acceptor.rs | //! Future for mediating the processing of commands received from the
//! CtlGateway in the Supervisor.
use super::handler::CtlHandler;
use crate::{ctl_gateway::server::MgrReceiver,
manager::{action::ActionSender,
ManagerState}};
use futures::{channel::oneshot,
future::FutureExt,
stream::{Stream,
StreamExt},
task::{Context,
Poll}};
use std::{pin::Pin,
sync::Arc};
pub struct CtlAcceptor {
/// Communication channel from the control gateway server. User
/// interactions are received there and then sent here into the
/// `CtlAcceptor` future for further processing.
mgr_receiver: MgrReceiver,
/// Reference to the Supervisor's main state. This is passed into
/// handlers that need to access, e.g., what services are running,
/// etc.
state: Arc<ManagerState>,
/// Signaling channel for the intention to shut down. A message
/// received on this channel will cause the `CtlAcceptor` future
/// stream to terminate.
shutdown_trigger: oneshot::Receiver<()>,
/// Communication channel back into the main Supervisor loop. This
/// is passed into any generated command handlers as a way to
/// send actions into the Supervisor.
action_sender: ActionSender,
}
impl CtlAcceptor {
pub fn new(state: Arc<ManagerState>,
mgr_receiver: MgrReceiver,
shutdown_trigger: oneshot::Receiver<()>,
action_sender: ActionSender)
-> Self {
CtlAcceptor { mgr_receiver,
state,
shutdown_trigger,
action_sender }
}
}
impl Stream for CtlAcceptor {
type Item = CtlHandler;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
match self.shutdown_trigger.poll_unpin(cx) {
Poll::Ready(Ok(())) => {
info!("Signal received; stopping CtlAcceptor");
Poll::Ready(None)
}
Poll::Ready(Err(e)) => |
Poll::Pending => {
match futures::ready!(self.mgr_receiver.poll_next_unpin(cx)) {
Some(cmd) => {
let task =
CtlHandler::new(cmd, self.state.clone(), self.action_sender.clone());
Poll::Ready(Some(task))
}
None => Poll::Ready(None),
}
}
}
}
}
| {
error!("Error polling CtlAcceptor shutdown trigger: {}", e);
Poll::Ready(None)
} | conditional_block |
acceptor.rs | //! Future for mediating the processing of commands received from the
//! CtlGateway in the Supervisor.
use super::handler::CtlHandler;
use crate::{ctl_gateway::server::MgrReceiver,
manager::{action::ActionSender,
ManagerState}};
use futures::{channel::oneshot,
future::FutureExt,
stream::{Stream,
StreamExt},
task::{Context,
Poll}};
use std::{pin::Pin,
sync::Arc};
pub struct CtlAcceptor {
/// Communication channel from the control gateway server. User
/// interactions are received there and then sent here into the
/// `CtlAcceptor` future for further processing.
mgr_receiver: MgrReceiver,
/// Reference to the Supervisor's main state. This is passed into
/// handlers that need to access, e.g., what services are running,
/// etc.
state: Arc<ManagerState>,
/// Signaling channel for the intention to shut down. A message
/// received on this channel will cause the `CtlAcceptor` future
/// stream to terminate.
shutdown_trigger: oneshot::Receiver<()>,
/// Communication channel back into the main Supervisor loop. This
/// is passed into any generated command handlers as a way to
/// send actions into the Supervisor.
action_sender: ActionSender,
}
impl CtlAcceptor {
pub fn new(state: Arc<ManagerState>,
mgr_receiver: MgrReceiver,
shutdown_trigger: oneshot::Receiver<()>,
action_sender: ActionSender)
-> Self {
CtlAcceptor { mgr_receiver,
state,
shutdown_trigger,
action_sender }
}
}
impl Stream for CtlAcceptor {
type Item = CtlHandler;
fn | (mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
match self.shutdown_trigger.poll_unpin(cx) {
Poll::Ready(Ok(())) => {
info!("Signal received; stopping CtlAcceptor");
Poll::Ready(None)
}
Poll::Ready(Err(e)) => {
error!("Error polling CtlAcceptor shutdown trigger: {}", e);
Poll::Ready(None)
}
Poll::Pending => {
match futures::ready!(self.mgr_receiver.poll_next_unpin(cx)) {
Some(cmd) => {
let task =
CtlHandler::new(cmd, self.state.clone(), self.action_sender.clone());
Poll::Ready(Some(task))
}
None => Poll::Ready(None),
}
}
}
}
}
| poll_next | identifier_name |
acceptor.rs | //! Future for mediating the processing of commands received from the
//! CtlGateway in the Supervisor.
use super::handler::CtlHandler;
use crate::{ctl_gateway::server::MgrReceiver,
manager::{action::ActionSender,
ManagerState}};
use futures::{channel::oneshot,
future::FutureExt,
stream::{Stream,
StreamExt},
task::{Context,
Poll}};
use std::{pin::Pin,
sync::Arc};
pub struct CtlAcceptor {
/// Communication channel from the control gateway server. User
/// interactions are received there and then sent here into the
/// `CtlAcceptor` future for further processing.
mgr_receiver: MgrReceiver,
/// Reference to the Supervisor's main state. This is passed into
/// handlers that need to access, e.g., what services are running,
/// etc.
state: Arc<ManagerState>,
/// Signaling channel for the intention to shut down. A message
/// received on this channel will cause the `CtlAcceptor` future
/// stream to terminate.
shutdown_trigger: oneshot::Receiver<()>,
/// Communication channel back into the main Supervisor loop. This
/// is passed into any generated command handlers as a way to
/// send actions into the Supervisor.
action_sender: ActionSender,
}
impl CtlAcceptor {
pub fn new(state: Arc<ManagerState>, | shutdown_trigger: oneshot::Receiver<()>,
action_sender: ActionSender)
-> Self {
CtlAcceptor { mgr_receiver,
state,
shutdown_trigger,
action_sender }
}
}
impl Stream for CtlAcceptor {
type Item = CtlHandler;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
match self.shutdown_trigger.poll_unpin(cx) {
Poll::Ready(Ok(())) => {
info!("Signal received; stopping CtlAcceptor");
Poll::Ready(None)
}
Poll::Ready(Err(e)) => {
error!("Error polling CtlAcceptor shutdown trigger: {}", e);
Poll::Ready(None)
}
Poll::Pending => {
match futures::ready!(self.mgr_receiver.poll_next_unpin(cx)) {
Some(cmd) => {
let task =
CtlHandler::new(cmd, self.state.clone(), self.action_sender.clone());
Poll::Ready(Some(task))
}
None => Poll::Ready(None),
}
}
}
}
} | mgr_receiver: MgrReceiver, | random_line_split |
main.rs | extern crate rustc_serialize;
extern crate docopt;
extern crate glob;
use docopt::Docopt;
use std::io::Write;
use std::path::PathBuf;
use glob::glob;
#[cfg_attr(rustfmt, rustfmt_skip)] | Kibar imager. Helper utils to download, format, install and manage raspbery
pi images for the kibar project.
Usage:
img install <device>
img mount <device> <location>
img unmount (<device> | <location>)
img chroot <device>
img (-h | --help | --version)
Options:
-h --help Show this screen.
--version Show version.
";
#[derive(Debug, RustcDecodable)]
struct Args {
arg_device: String,
arg_location: String,
cmd_install: bool,
cmd_mount: bool,
cmd_unmount: bool,
cmd_chroot: bool,
}
#[derive(Debug)]
struct Device {
device_file: PathBuf,
partitions: Vec<PathBuf>,
}
impl Device {
// TODO pass errors up rather then just panicing
fn new(device_file: String) -> Device {
let pattern = device_file.clone() + "?[0-9]";
Device {
device_file: PathBuf::from(device_file),
partitions: glob(&pattern).unwrap().map(|r| r.unwrap()).collect(),
}
}
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.decode())
.unwrap_or_else(|e| e.exit());
println!("{:?}", args);
if args.cmd_install {
unimplemented!()
} else if args.cmd_mount {
let d = Device::new(args.arg_device);
println!("{:?}", d);
} else if args.cmd_unmount {
unimplemented!();
writeln!(&mut std::io::stderr(), "Error!").unwrap();
::std::process::exit(1)
} else if args.cmd_chroot {
unimplemented!()
} else {
unimplemented!()
}
} | const USAGE: &'static str = " | random_line_split |
main.rs | extern crate rustc_serialize;
extern crate docopt;
extern crate glob;
use docopt::Docopt;
use std::io::Write;
use std::path::PathBuf;
use glob::glob;
#[cfg_attr(rustfmt, rustfmt_skip)]
const USAGE: &'static str = "
Kibar imager. Helper utils to download, format, install and manage raspbery
pi images for the kibar project.
Usage:
img install <device>
img mount <device> <location>
img unmount (<device> | <location>)
img chroot <device>
img (-h | --help | --version)
Options:
-h --help Show this screen.
--version Show version.
";
#[derive(Debug, RustcDecodable)]
struct | {
arg_device: String,
arg_location: String,
cmd_install: bool,
cmd_mount: bool,
cmd_unmount: bool,
cmd_chroot: bool,
}
#[derive(Debug)]
struct Device {
device_file: PathBuf,
partitions: Vec<PathBuf>,
}
impl Device {
// TODO pass errors up rather then just panicing
fn new(device_file: String) -> Device {
let pattern = device_file.clone() + "?[0-9]";
Device {
device_file: PathBuf::from(device_file),
partitions: glob(&pattern).unwrap().map(|r| r.unwrap()).collect(),
}
}
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.decode())
.unwrap_or_else(|e| e.exit());
println!("{:?}", args);
if args.cmd_install {
unimplemented!()
} else if args.cmd_mount {
let d = Device::new(args.arg_device);
println!("{:?}", d);
} else if args.cmd_unmount {
unimplemented!();
writeln!(&mut std::io::stderr(), "Error!").unwrap();
::std::process::exit(1)
} else if args.cmd_chroot {
unimplemented!()
} else {
unimplemented!()
}
}
| Args | identifier_name |
main.rs | extern crate rustc_serialize;
extern crate docopt;
extern crate glob;
use docopt::Docopt;
use std::io::Write;
use std::path::PathBuf;
use glob::glob;
#[cfg_attr(rustfmt, rustfmt_skip)]
const USAGE: &'static str = "
Kibar imager. Helper utils to download, format, install and manage raspbery
pi images for the kibar project.
Usage:
img install <device>
img mount <device> <location>
img unmount (<device> | <location>)
img chroot <device>
img (-h | --help | --version)
Options:
-h --help Show this screen.
--version Show version.
";
#[derive(Debug, RustcDecodable)]
struct Args {
arg_device: String,
arg_location: String,
cmd_install: bool,
cmd_mount: bool,
cmd_unmount: bool,
cmd_chroot: bool,
}
#[derive(Debug)]
struct Device {
device_file: PathBuf,
partitions: Vec<PathBuf>,
}
impl Device {
// TODO pass errors up rather then just panicing
fn new(device_file: String) -> Device {
let pattern = device_file.clone() + "?[0-9]";
Device {
device_file: PathBuf::from(device_file),
partitions: glob(&pattern).unwrap().map(|r| r.unwrap()).collect(),
}
}
}
fn main() | }
| {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.decode())
.unwrap_or_else(|e| e.exit());
println!("{:?}", args);
if args.cmd_install {
unimplemented!()
} else if args.cmd_mount {
let d = Device::new(args.arg_device);
println!("{:?}", d);
} else if args.cmd_unmount {
unimplemented!();
writeln!(&mut std::io::stderr(), "Error!").unwrap();
::std::process::exit(1)
} else if args.cmd_chroot {
unimplemented!()
} else {
unimplemented!()
} | identifier_body |
main.rs | extern crate rustc_serialize;
extern crate docopt;
extern crate glob;
use docopt::Docopt;
use std::io::Write;
use std::path::PathBuf;
use glob::glob;
#[cfg_attr(rustfmt, rustfmt_skip)]
const USAGE: &'static str = "
Kibar imager. Helper utils to download, format, install and manage raspbery
pi images for the kibar project.
Usage:
img install <device>
img mount <device> <location>
img unmount (<device> | <location>)
img chroot <device>
img (-h | --help | --version)
Options:
-h --help Show this screen.
--version Show version.
";
#[derive(Debug, RustcDecodable)]
struct Args {
arg_device: String,
arg_location: String,
cmd_install: bool,
cmd_mount: bool,
cmd_unmount: bool,
cmd_chroot: bool,
}
#[derive(Debug)]
struct Device {
device_file: PathBuf,
partitions: Vec<PathBuf>,
}
impl Device {
// TODO pass errors up rather then just panicing
fn new(device_file: String) -> Device {
let pattern = device_file.clone() + "?[0-9]";
Device {
device_file: PathBuf::from(device_file),
partitions: glob(&pattern).unwrap().map(|r| r.unwrap()).collect(),
}
}
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.decode())
.unwrap_or_else(|e| e.exit());
println!("{:?}", args);
if args.cmd_install | else if args.cmd_mount {
let d = Device::new(args.arg_device);
println!("{:?}", d);
} else if args.cmd_unmount {
unimplemented!();
writeln!(&mut std::io::stderr(), "Error!").unwrap();
::std::process::exit(1)
} else if args.cmd_chroot {
unimplemented!()
} else {
unimplemented!()
}
}
| {
unimplemented!()
} | conditional_block |
microtask.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Implementation of [microtasks](https://html.spec.whatwg.org/multipage/#microtask) and
//! microtask queues. It is up to implementations of event loops to store a queue and
//! perform checkpoints at appropriate times, as well as enqueue microtasks as required.
use crate::dom::bindings::callback::ExceptionHandling;
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::PromiseBinding::PromiseJobCallback;
use crate::dom::bindings::codegen::Bindings::VoidFunctionBinding::VoidFunction;
use crate::dom::bindings::root::DomRoot;
use crate::dom::globalscope::GlobalScope;
use crate::dom::htmlimageelement::ImageElementMicrotask;
use crate::dom::htmlmediaelement::MediaElementMicrotask;
use crate::dom::mutationobserver::MutationObserver;
use crate::script_runtime::{notify_about_rejected_promises, JSContext};
use crate::script_thread::ScriptThread;
use js::jsapi::{JobQueueIsEmpty, JobQueueMayNotBeEmpty};
use msg::constellation_msg::PipelineId;
use std::cell::Cell;
use std::mem;
use std::rc::Rc;
/// A collection of microtasks in FIFO order.
#[derive(Default, JSTraceable, MallocSizeOf)]
pub struct MicrotaskQueue {
/// The list of enqueued microtasks that will be invoked at the next microtask checkpoint.
microtask_queue: DomRefCell<Vec<Microtask>>,
/// <https://html.spec.whatwg.org/multipage/#performing-a-microtask-checkpoint>
performing_a_microtask_checkpoint: Cell<bool>,
}
#[derive(JSTraceable, MallocSizeOf)]
pub enum Microtask {
Promise(EnqueuedPromiseCallback),
User(UserMicrotask),
MediaElement(MediaElementMicrotask),
ImageElement(ImageElementMicrotask),
CustomElementReaction,
NotifyMutationObservers,
}
pub trait MicrotaskRunnable {
fn handler(&self) {}
}
/// A promise callback scheduled to run during the next microtask checkpoint (#4283).
#[derive(JSTraceable, MallocSizeOf)]
pub struct EnqueuedPromiseCallback {
#[ignore_malloc_size_of = "Rc has unclear ownership"]
pub callback: Rc<PromiseJobCallback>,
pub pipeline: PipelineId,
}
/// A microtask that comes from a queueMicrotask() Javascript call,
/// identical to EnqueuedPromiseCallback once it's on the queue
#[derive(JSTraceable, MallocSizeOf)]
pub struct UserMicrotask {
#[ignore_malloc_size_of = "Rc has unclear ownership"]
pub callback: Rc<VoidFunction>,
pub pipeline: PipelineId,
}
impl MicrotaskQueue {
/// Add a new microtask to this queue. It will be invoked as part of the next
/// microtask checkpoint.
#[allow(unsafe_code)]
pub fn enqueue(&self, job: Microtask, cx: JSContext) {
self.microtask_queue.borrow_mut().push(job);
unsafe { JobQueueMayNotBeEmpty(*cx) };
}
/// <https://html.spec.whatwg.org/multipage/#perform-a-microtask-checkpoint>
/// Perform a microtask checkpoint, executing all queued microtasks until the queue is empty.
#[allow(unsafe_code)]
pub fn checkpoint<F>(
&self,
cx: JSContext,
target_provider: F,
globalscopes: Vec<DomRoot<GlobalScope>>,
) where
F: Fn(PipelineId) -> Option<DomRoot<GlobalScope>>,
| match *job {
Microtask::Promise(ref job) => {
if let Some(target) = target_provider(job.pipeline) {
let _ = job.callback.Call_(&*target, ExceptionHandling::Report);
}
},
Microtask::User(ref job) => {
if let Some(target) = target_provider(job.pipeline) {
let _ = job.callback.Call_(&*target, ExceptionHandling::Report);
}
},
Microtask::MediaElement(ref task) => {
task.handler();
},
Microtask::ImageElement(ref task) => {
task.handler();
},
Microtask::CustomElementReaction => {
ScriptThread::invoke_backup_element_queue();
},
Microtask::NotifyMutationObservers => {
MutationObserver::notify_mutation_observers();
},
}
}
}
// Step 3
for global in globalscopes.into_iter() {
notify_about_rejected_promises(&global);
}
// TODO: Step 4 - Cleanup Indexed Database transactions.
// Step 5
self.performing_a_microtask_checkpoint.set(false);
}
pub fn empty(&self) -> bool {
self.microtask_queue.borrow().is_empty()
}
}
| {
if self.performing_a_microtask_checkpoint.get() {
return;
}
// Step 1
self.performing_a_microtask_checkpoint.set(true);
debug!("Now performing a microtask checkpoint");
// Steps 2
while !self.microtask_queue.borrow().is_empty() {
rooted_vec!(let mut pending_queue);
mem::swap(&mut *pending_queue, &mut *self.microtask_queue.borrow_mut());
for (idx, job) in pending_queue.iter().enumerate() {
if idx == pending_queue.len() - 1 && self.microtask_queue.borrow().is_empty() {
unsafe { JobQueueIsEmpty(*cx) };
}
| identifier_body |
microtask.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Implementation of [microtasks](https://html.spec.whatwg.org/multipage/#microtask) and
//! microtask queues. It is up to implementations of event loops to store a queue and
//! perform checkpoints at appropriate times, as well as enqueue microtasks as required.
use crate::dom::bindings::callback::ExceptionHandling;
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::PromiseBinding::PromiseJobCallback;
use crate::dom::bindings::codegen::Bindings::VoidFunctionBinding::VoidFunction;
use crate::dom::bindings::root::DomRoot;
use crate::dom::globalscope::GlobalScope;
use crate::dom::htmlimageelement::ImageElementMicrotask;
use crate::dom::htmlmediaelement::MediaElementMicrotask;
use crate::dom::mutationobserver::MutationObserver;
use crate::script_runtime::{notify_about_rejected_promises, JSContext};
use crate::script_thread::ScriptThread;
use js::jsapi::{JobQueueIsEmpty, JobQueueMayNotBeEmpty};
use msg::constellation_msg::PipelineId;
use std::cell::Cell;
use std::mem;
use std::rc::Rc;
/// A collection of microtasks in FIFO order.
#[derive(Default, JSTraceable, MallocSizeOf)]
pub struct MicrotaskQueue {
/// The list of enqueued microtasks that will be invoked at the next microtask checkpoint.
microtask_queue: DomRefCell<Vec<Microtask>>,
/// <https://html.spec.whatwg.org/multipage/#performing-a-microtask-checkpoint>
performing_a_microtask_checkpoint: Cell<bool>,
}
#[derive(JSTraceable, MallocSizeOf)]
pub enum Microtask {
Promise(EnqueuedPromiseCallback),
User(UserMicrotask),
MediaElement(MediaElementMicrotask),
ImageElement(ImageElementMicrotask),
CustomElementReaction,
NotifyMutationObservers,
}
pub trait MicrotaskRunnable {
fn handler(&self) {}
}
/// A promise callback scheduled to run during the next microtask checkpoint (#4283).
#[derive(JSTraceable, MallocSizeOf)]
pub struct EnqueuedPromiseCallback {
#[ignore_malloc_size_of = "Rc has unclear ownership"]
pub callback: Rc<PromiseJobCallback>,
pub pipeline: PipelineId,
}
/// A microtask that comes from a queueMicrotask() Javascript call,
/// identical to EnqueuedPromiseCallback once it's on the queue
#[derive(JSTraceable, MallocSizeOf)]
pub struct UserMicrotask {
#[ignore_malloc_size_of = "Rc has unclear ownership"]
pub callback: Rc<VoidFunction>,
pub pipeline: PipelineId,
}
impl MicrotaskQueue {
/// Add a new microtask to this queue. It will be invoked as part of the next
/// microtask checkpoint.
#[allow(unsafe_code)]
pub fn | (&self, job: Microtask, cx: JSContext) {
self.microtask_queue.borrow_mut().push(job);
unsafe { JobQueueMayNotBeEmpty(*cx) };
}
/// <https://html.spec.whatwg.org/multipage/#perform-a-microtask-checkpoint>
/// Perform a microtask checkpoint, executing all queued microtasks until the queue is empty.
#[allow(unsafe_code)]
pub fn checkpoint<F>(
&self,
cx: JSContext,
target_provider: F,
globalscopes: Vec<DomRoot<GlobalScope>>,
) where
F: Fn(PipelineId) -> Option<DomRoot<GlobalScope>>,
{
if self.performing_a_microtask_checkpoint.get() {
return;
}
// Step 1
self.performing_a_microtask_checkpoint.set(true);
debug!("Now performing a microtask checkpoint");
// Steps 2
while!self.microtask_queue.borrow().is_empty() {
rooted_vec!(let mut pending_queue);
mem::swap(&mut *pending_queue, &mut *self.microtask_queue.borrow_mut());
for (idx, job) in pending_queue.iter().enumerate() {
if idx == pending_queue.len() - 1 && self.microtask_queue.borrow().is_empty() {
unsafe { JobQueueIsEmpty(*cx) };
}
match *job {
Microtask::Promise(ref job) => {
if let Some(target) = target_provider(job.pipeline) {
let _ = job.callback.Call_(&*target, ExceptionHandling::Report);
}
},
Microtask::User(ref job) => {
if let Some(target) = target_provider(job.pipeline) {
let _ = job.callback.Call_(&*target, ExceptionHandling::Report);
}
},
Microtask::MediaElement(ref task) => {
task.handler();
},
Microtask::ImageElement(ref task) => {
task.handler();
},
Microtask::CustomElementReaction => {
ScriptThread::invoke_backup_element_queue();
},
Microtask::NotifyMutationObservers => {
MutationObserver::notify_mutation_observers();
},
}
}
}
// Step 3
for global in globalscopes.into_iter() {
notify_about_rejected_promises(&global);
}
// TODO: Step 4 - Cleanup Indexed Database transactions.
// Step 5
self.performing_a_microtask_checkpoint.set(false);
}
pub fn empty(&self) -> bool {
self.microtask_queue.borrow().is_empty()
}
}
| enqueue | identifier_name |
microtask.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Implementation of [microtasks](https://html.spec.whatwg.org/multipage/#microtask) and
//! microtask queues. It is up to implementations of event loops to store a queue and
//! perform checkpoints at appropriate times, as well as enqueue microtasks as required.
use crate::dom::bindings::callback::ExceptionHandling;
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::PromiseBinding::PromiseJobCallback;
use crate::dom::bindings::codegen::Bindings::VoidFunctionBinding::VoidFunction;
use crate::dom::bindings::root::DomRoot;
use crate::dom::globalscope::GlobalScope;
use crate::dom::htmlimageelement::ImageElementMicrotask;
use crate::dom::htmlmediaelement::MediaElementMicrotask;
use crate::dom::mutationobserver::MutationObserver;
use crate::script_runtime::{notify_about_rejected_promises, JSContext};
use crate::script_thread::ScriptThread;
use js::jsapi::{JobQueueIsEmpty, JobQueueMayNotBeEmpty};
use msg::constellation_msg::PipelineId;
use std::cell::Cell;
use std::mem;
use std::rc::Rc;
/// A collection of microtasks in FIFO order.
#[derive(Default, JSTraceable, MallocSizeOf)]
pub struct MicrotaskQueue {
/// The list of enqueued microtasks that will be invoked at the next microtask checkpoint.
microtask_queue: DomRefCell<Vec<Microtask>>,
/// <https://html.spec.whatwg.org/multipage/#performing-a-microtask-checkpoint>
performing_a_microtask_checkpoint: Cell<bool>,
}
#[derive(JSTraceable, MallocSizeOf)]
pub enum Microtask {
Promise(EnqueuedPromiseCallback),
User(UserMicrotask),
MediaElement(MediaElementMicrotask),
ImageElement(ImageElementMicrotask),
CustomElementReaction,
NotifyMutationObservers,
}
pub trait MicrotaskRunnable {
fn handler(&self) {}
}
/// A promise callback scheduled to run during the next microtask checkpoint (#4283).
#[derive(JSTraceable, MallocSizeOf)]
pub struct EnqueuedPromiseCallback {
#[ignore_malloc_size_of = "Rc has unclear ownership"]
pub callback: Rc<PromiseJobCallback>,
pub pipeline: PipelineId,
}
/// A microtask that comes from a queueMicrotask() Javascript call,
/// identical to EnqueuedPromiseCallback once it's on the queue
#[derive(JSTraceable, MallocSizeOf)]
pub struct UserMicrotask {
#[ignore_malloc_size_of = "Rc has unclear ownership"]
pub callback: Rc<VoidFunction>,
pub pipeline: PipelineId,
}
impl MicrotaskQueue {
/// Add a new microtask to this queue. It will be invoked as part of the next
/// microtask checkpoint.
#[allow(unsafe_code)]
pub fn enqueue(&self, job: Microtask, cx: JSContext) {
self.microtask_queue.borrow_mut().push(job);
unsafe { JobQueueMayNotBeEmpty(*cx) };
}
/// <https://html.spec.whatwg.org/multipage/#perform-a-microtask-checkpoint>
/// Perform a microtask checkpoint, executing all queued microtasks until the queue is empty.
#[allow(unsafe_code)]
pub fn checkpoint<F>(
&self,
cx: JSContext,
target_provider: F,
globalscopes: Vec<DomRoot<GlobalScope>>,
) where
F: Fn(PipelineId) -> Option<DomRoot<GlobalScope>>,
{
if self.performing_a_microtask_checkpoint.get() {
return;
}
// Step 1
self.performing_a_microtask_checkpoint.set(true);
debug!("Now performing a microtask checkpoint");
// Steps 2
while!self.microtask_queue.borrow().is_empty() {
rooted_vec!(let mut pending_queue);
mem::swap(&mut *pending_queue, &mut *self.microtask_queue.borrow_mut());
for (idx, job) in pending_queue.iter().enumerate() {
if idx == pending_queue.len() - 1 && self.microtask_queue.borrow().is_empty() {
unsafe { JobQueueIsEmpty(*cx) };
}
match *job {
Microtask::Promise(ref job) => {
if let Some(target) = target_provider(job.pipeline) {
let _ = job.callback.Call_(&*target, ExceptionHandling::Report);
}
},
Microtask::User(ref job) => {
if let Some(target) = target_provider(job.pipeline) {
let _ = job.callback.Call_(&*target, ExceptionHandling::Report);
}
},
Microtask::MediaElement(ref task) => { | },
Microtask::CustomElementReaction => {
ScriptThread::invoke_backup_element_queue();
},
Microtask::NotifyMutationObservers => {
MutationObserver::notify_mutation_observers();
},
}
}
}
// Step 3
for global in globalscopes.into_iter() {
notify_about_rejected_promises(&global);
}
// TODO: Step 4 - Cleanup Indexed Database transactions.
// Step 5
self.performing_a_microtask_checkpoint.set(false);
}
pub fn empty(&self) -> bool {
self.microtask_queue.borrow().is_empty()
}
} | task.handler();
},
Microtask::ImageElement(ref task) => {
task.handler(); | random_line_split |
microtask.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Implementation of [microtasks](https://html.spec.whatwg.org/multipage/#microtask) and
//! microtask queues. It is up to implementations of event loops to store a queue and
//! perform checkpoints at appropriate times, as well as enqueue microtasks as required.
use crate::dom::bindings::callback::ExceptionHandling;
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::PromiseBinding::PromiseJobCallback;
use crate::dom::bindings::codegen::Bindings::VoidFunctionBinding::VoidFunction;
use crate::dom::bindings::root::DomRoot;
use crate::dom::globalscope::GlobalScope;
use crate::dom::htmlimageelement::ImageElementMicrotask;
use crate::dom::htmlmediaelement::MediaElementMicrotask;
use crate::dom::mutationobserver::MutationObserver;
use crate::script_runtime::{notify_about_rejected_promises, JSContext};
use crate::script_thread::ScriptThread;
use js::jsapi::{JobQueueIsEmpty, JobQueueMayNotBeEmpty};
use msg::constellation_msg::PipelineId;
use std::cell::Cell;
use std::mem;
use std::rc::Rc;
/// A collection of microtasks in FIFO order.
#[derive(Default, JSTraceable, MallocSizeOf)]
pub struct MicrotaskQueue {
/// The list of enqueued microtasks that will be invoked at the next microtask checkpoint.
microtask_queue: DomRefCell<Vec<Microtask>>,
/// <https://html.spec.whatwg.org/multipage/#performing-a-microtask-checkpoint>
performing_a_microtask_checkpoint: Cell<bool>,
}
#[derive(JSTraceable, MallocSizeOf)]
pub enum Microtask {
Promise(EnqueuedPromiseCallback),
User(UserMicrotask),
MediaElement(MediaElementMicrotask),
ImageElement(ImageElementMicrotask),
CustomElementReaction,
NotifyMutationObservers,
}
pub trait MicrotaskRunnable {
fn handler(&self) {}
}
/// A promise callback scheduled to run during the next microtask checkpoint (#4283).
#[derive(JSTraceable, MallocSizeOf)]
pub struct EnqueuedPromiseCallback {
#[ignore_malloc_size_of = "Rc has unclear ownership"]
pub callback: Rc<PromiseJobCallback>,
pub pipeline: PipelineId,
}
/// A microtask that comes from a queueMicrotask() Javascript call,
/// identical to EnqueuedPromiseCallback once it's on the queue
#[derive(JSTraceable, MallocSizeOf)]
pub struct UserMicrotask {
#[ignore_malloc_size_of = "Rc has unclear ownership"]
pub callback: Rc<VoidFunction>,
pub pipeline: PipelineId,
}
impl MicrotaskQueue {
/// Add a new microtask to this queue. It will be invoked as part of the next
/// microtask checkpoint.
#[allow(unsafe_code)]
pub fn enqueue(&self, job: Microtask, cx: JSContext) {
self.microtask_queue.borrow_mut().push(job);
unsafe { JobQueueMayNotBeEmpty(*cx) };
}
/// <https://html.spec.whatwg.org/multipage/#perform-a-microtask-checkpoint>
/// Perform a microtask checkpoint, executing all queued microtasks until the queue is empty.
#[allow(unsafe_code)]
pub fn checkpoint<F>(
&self,
cx: JSContext,
target_provider: F,
globalscopes: Vec<DomRoot<GlobalScope>>,
) where
F: Fn(PipelineId) -> Option<DomRoot<GlobalScope>>,
{
if self.performing_a_microtask_checkpoint.get() {
return;
}
// Step 1
self.performing_a_microtask_checkpoint.set(true);
debug!("Now performing a microtask checkpoint");
// Steps 2
while!self.microtask_queue.borrow().is_empty() {
rooted_vec!(let mut pending_queue);
mem::swap(&mut *pending_queue, &mut *self.microtask_queue.borrow_mut());
for (idx, job) in pending_queue.iter().enumerate() {
if idx == pending_queue.len() - 1 && self.microtask_queue.borrow().is_empty() {
unsafe { JobQueueIsEmpty(*cx) };
}
match *job {
Microtask::Promise(ref job) => {
if let Some(target) = target_provider(job.pipeline) {
let _ = job.callback.Call_(&*target, ExceptionHandling::Report);
}
},
Microtask::User(ref job) => {
if let Some(target) = target_provider(job.pipeline) |
},
Microtask::MediaElement(ref task) => {
task.handler();
},
Microtask::ImageElement(ref task) => {
task.handler();
},
Microtask::CustomElementReaction => {
ScriptThread::invoke_backup_element_queue();
},
Microtask::NotifyMutationObservers => {
MutationObserver::notify_mutation_observers();
},
}
}
}
// Step 3
for global in globalscopes.into_iter() {
notify_about_rejected_promises(&global);
}
// TODO: Step 4 - Cleanup Indexed Database transactions.
// Step 5
self.performing_a_microtask_checkpoint.set(false);
}
pub fn empty(&self) -> bool {
self.microtask_queue.borrow().is_empty()
}
}
| {
let _ = job.callback.Call_(&*target, ExceptionHandling::Report);
} | conditional_block |
fn_to_numeric_cast_any.rs | use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::source::snippet_with_applicability;
use rustc_errors::Applicability;
use rustc_hir::Expr;
use rustc_lint::LateContext;
use rustc_middle::ty::{self, Ty};
use super::FN_TO_NUMERIC_CAST_ANY;
pub(super) fn | (cx: &LateContext<'_>, expr: &Expr<'_>, cast_expr: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) {
// We allow casts from any function type to any function type.
match cast_to.kind() {
ty::FnDef(..) | ty::FnPtr(..) => return,
_ => { /* continue to checks */ },
}
match cast_from.kind() {
ty::FnDef(..) | ty::FnPtr(_) => {
let mut applicability = Applicability::MaybeIncorrect;
let from_snippet = snippet_with_applicability(cx, cast_expr.span, "..", &mut applicability);
span_lint_and_sugg(
cx,
FN_TO_NUMERIC_CAST_ANY,
expr.span,
&format!("casting function pointer `{}` to `{}`", from_snippet, cast_to),
"did you mean to invoke the function?",
format!("{}() as {}", from_snippet, cast_to),
applicability,
);
},
_ => {},
}
}
| check | identifier_name |
fn_to_numeric_cast_any.rs | use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::source::snippet_with_applicability; | use rustc_errors::Applicability;
use rustc_hir::Expr;
use rustc_lint::LateContext;
use rustc_middle::ty::{self, Ty};
use super::FN_TO_NUMERIC_CAST_ANY;
pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_expr: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) {
// We allow casts from any function type to any function type.
match cast_to.kind() {
ty::FnDef(..) | ty::FnPtr(..) => return,
_ => { /* continue to checks */ },
}
match cast_from.kind() {
ty::FnDef(..) | ty::FnPtr(_) => {
let mut applicability = Applicability::MaybeIncorrect;
let from_snippet = snippet_with_applicability(cx, cast_expr.span, "..", &mut applicability);
span_lint_and_sugg(
cx,
FN_TO_NUMERIC_CAST_ANY,
expr.span,
&format!("casting function pointer `{}` to `{}`", from_snippet, cast_to),
"did you mean to invoke the function?",
format!("{}() as {}", from_snippet, cast_to),
applicability,
);
},
_ => {},
}
} | random_line_split |
|
fn_to_numeric_cast_any.rs | use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::source::snippet_with_applicability;
use rustc_errors::Applicability;
use rustc_hir::Expr;
use rustc_lint::LateContext;
use rustc_middle::ty::{self, Ty};
use super::FN_TO_NUMERIC_CAST_ANY;
pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_expr: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) {
// We allow casts from any function type to any function type.
match cast_to.kind() {
ty::FnDef(..) | ty::FnPtr(..) => return,
_ => { /* continue to checks */ },
}
match cast_from.kind() {
ty::FnDef(..) | ty::FnPtr(_) => {
let mut applicability = Applicability::MaybeIncorrect;
let from_snippet = snippet_with_applicability(cx, cast_expr.span, "..", &mut applicability);
span_lint_and_sugg(
cx,
FN_TO_NUMERIC_CAST_ANY,
expr.span,
&format!("casting function pointer `{}` to `{}`", from_snippet, cast_to),
"did you mean to invoke the function?",
format!("{}() as {}", from_snippet, cast_to),
applicability,
);
},
_ => | ,
}
}
| {} | conditional_block |
fn_to_numeric_cast_any.rs | use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::source::snippet_with_applicability;
use rustc_errors::Applicability;
use rustc_hir::Expr;
use rustc_lint::LateContext;
use rustc_middle::ty::{self, Ty};
use super::FN_TO_NUMERIC_CAST_ANY;
pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_expr: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) | );
},
_ => {},
}
}
| {
// We allow casts from any function type to any function type.
match cast_to.kind() {
ty::FnDef(..) | ty::FnPtr(..) => return,
_ => { /* continue to checks */ },
}
match cast_from.kind() {
ty::FnDef(..) | ty::FnPtr(_) => {
let mut applicability = Applicability::MaybeIncorrect;
let from_snippet = snippet_with_applicability(cx, cast_expr.span, "..", &mut applicability);
span_lint_and_sugg(
cx,
FN_TO_NUMERIC_CAST_ANY,
expr.span,
&format!("casting function pointer `{}` to `{}`", from_snippet, cast_to),
"did you mean to invoke the function?",
format!("{}() as {}", from_snippet, cast_to),
applicability, | identifier_body |
init.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::RegisterBindings;
use crate::dom::bindings::proxyhandler;
use crate::script_runtime::JSEngineSetup;
use crate::serviceworker_manager::ServiceWorkerManager;
use script_traits::SWManagerSenders;
#[cfg(target_os = "linux")]
#[allow(unsafe_code)]
fn perform_platform_specific_initialization() {
// 4096 is default max on many linux systems
const MAX_FILE_LIMIT: libc::rlim_t = 4096;
| let mut rlim = libc::rlimit {
rlim_cur: 0,
rlim_max: 0,
};
match libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) {
0 => {
if rlim.rlim_cur >= MAX_FILE_LIMIT {
// we have more than enough
return;
}
rlim.rlim_cur = match rlim.rlim_max {
libc::RLIM_INFINITY => MAX_FILE_LIMIT,
_ => {
if rlim.rlim_max < MAX_FILE_LIMIT {
rlim.rlim_max
} else {
MAX_FILE_LIMIT
}
},
};
match libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) {
0 => (),
_ => warn!("Failed to set file count limit"),
};
},
_ => warn!("Failed to get file count limit"),
};
}
}
#[cfg(not(target_os = "linux"))]
fn perform_platform_specific_initialization() {}
pub fn init_service_workers(sw_senders: SWManagerSenders) {
// Spawn the service worker manager passing the constellation sender
ServiceWorkerManager::spawn_manager(sw_senders);
}
#[allow(unsafe_code)]
pub fn init() -> JSEngineSetup {
unsafe {
proxyhandler::init();
// Create the global vtables used by the (generated) DOM
// bindings to implement JS proxies.
RegisterBindings::RegisterProxyHandlers();
}
perform_platform_specific_initialization();
JSEngineSetup::new()
} | // Bump up our number of file descriptors to save us from impending doom caused by an onslaught
// of iframes.
unsafe { | random_line_split |
init.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::RegisterBindings;
use crate::dom::bindings::proxyhandler;
use crate::script_runtime::JSEngineSetup;
use crate::serviceworker_manager::ServiceWorkerManager;
use script_traits::SWManagerSenders;
#[cfg(target_os = "linux")]
#[allow(unsafe_code)]
fn perform_platform_specific_initialization() {
// 4096 is default max on many linux systems
const MAX_FILE_LIMIT: libc::rlim_t = 4096;
// Bump up our number of file descriptors to save us from impending doom caused by an onslaught
// of iframes.
unsafe {
let mut rlim = libc::rlimit {
rlim_cur: 0,
rlim_max: 0,
};
match libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) {
0 => {
if rlim.rlim_cur >= MAX_FILE_LIMIT {
// we have more than enough
return;
}
rlim.rlim_cur = match rlim.rlim_max {
libc::RLIM_INFINITY => MAX_FILE_LIMIT,
_ => {
if rlim.rlim_max < MAX_FILE_LIMIT {
rlim.rlim_max
} else {
MAX_FILE_LIMIT
}
},
};
match libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) {
0 => (),
_ => warn!("Failed to set file count limit"),
};
},
_ => warn!("Failed to get file count limit"),
};
}
}
#[cfg(not(target_os = "linux"))]
fn perform_platform_specific_initialization() |
pub fn init_service_workers(sw_senders: SWManagerSenders) {
// Spawn the service worker manager passing the constellation sender
ServiceWorkerManager::spawn_manager(sw_senders);
}
#[allow(unsafe_code)]
pub fn init() -> JSEngineSetup {
unsafe {
proxyhandler::init();
// Create the global vtables used by the (generated) DOM
// bindings to implement JS proxies.
RegisterBindings::RegisterProxyHandlers();
}
perform_platform_specific_initialization();
JSEngineSetup::new()
}
| {} | identifier_body |
init.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::RegisterBindings;
use crate::dom::bindings::proxyhandler;
use crate::script_runtime::JSEngineSetup;
use crate::serviceworker_manager::ServiceWorkerManager;
use script_traits::SWManagerSenders;
#[cfg(target_os = "linux")]
#[allow(unsafe_code)]
fn | () {
// 4096 is default max on many linux systems
const MAX_FILE_LIMIT: libc::rlim_t = 4096;
// Bump up our number of file descriptors to save us from impending doom caused by an onslaught
// of iframes.
unsafe {
let mut rlim = libc::rlimit {
rlim_cur: 0,
rlim_max: 0,
};
match libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) {
0 => {
if rlim.rlim_cur >= MAX_FILE_LIMIT {
// we have more than enough
return;
}
rlim.rlim_cur = match rlim.rlim_max {
libc::RLIM_INFINITY => MAX_FILE_LIMIT,
_ => {
if rlim.rlim_max < MAX_FILE_LIMIT {
rlim.rlim_max
} else {
MAX_FILE_LIMIT
}
},
};
match libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) {
0 => (),
_ => warn!("Failed to set file count limit"),
};
},
_ => warn!("Failed to get file count limit"),
};
}
}
#[cfg(not(target_os = "linux"))]
fn perform_platform_specific_initialization() {}
pub fn init_service_workers(sw_senders: SWManagerSenders) {
// Spawn the service worker manager passing the constellation sender
ServiceWorkerManager::spawn_manager(sw_senders);
}
#[allow(unsafe_code)]
pub fn init() -> JSEngineSetup {
unsafe {
proxyhandler::init();
// Create the global vtables used by the (generated) DOM
// bindings to implement JS proxies.
RegisterBindings::RegisterProxyHandlers();
}
perform_platform_specific_initialization();
JSEngineSetup::new()
}
| perform_platform_specific_initialization | identifier_name |
clarity_cli.rs | /*
copyright: (c) 2013-2019 by Blockstack PBC, a public benefit corporation.
This file is part of Blockstack.
Blockstack is free software. You may redistribute or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License or
(at your option) any later version.
Blockstack is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY, including without the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Blockstack. If not, see <http://www.gnu.org/licenses/>.
*/
#![allow(unused_imports)]
#![allow(dead_code)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
extern crate blockstack_lib;
use std::env;
use blockstack_lib::{ util::log, clarity };
fn | () {
log::set_loglevel(log::LOG_DEBUG).unwrap();
let argv : Vec<String> = env::args().collect();
clarity::invoke_command(&argv[0], &argv[1..]);
}
| main | identifier_name |
clarity_cli.rs | /*
copyright: (c) 2013-2019 by Blockstack PBC, a public benefit corporation.
This file is part of Blockstack.
Blockstack is free software. You may redistribute or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License or
(at your option) any later version.
Blockstack is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY, including without the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Blockstack. If not, see <http://www.gnu.org/licenses/>.
*/
#![allow(unused_imports)]
#![allow(dead_code)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
extern crate blockstack_lib;
use std::env;
use blockstack_lib::{ util::log, clarity };
fn main() | {
log::set_loglevel(log::LOG_DEBUG).unwrap();
let argv : Vec<String> = env::args().collect();
clarity::invoke_command(&argv[0], &argv[1..]);
} | identifier_body |
|
clarity_cli.rs | /*
copyright: (c) 2013-2019 by Blockstack PBC, a public benefit corporation.
This file is part of Blockstack. | it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License or
(at your option) any later version.
Blockstack is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY, including without the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Blockstack. If not, see <http://www.gnu.org/licenses/>.
*/
#![allow(unused_imports)]
#![allow(dead_code)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
extern crate blockstack_lib;
use std::env;
use blockstack_lib::{ util::log, clarity };
fn main() {
log::set_loglevel(log::LOG_DEBUG).unwrap();
let argv : Vec<String> = env::args().collect();
clarity::invoke_command(&argv[0], &argv[1..]);
} |
Blockstack is free software. You may redistribute or modify | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.