file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
custom_build.rs | use std::collections::{HashMap, BTreeSet, HashSet};
use std::fs;
use std::path::{PathBuf, Path};
use std::str;
use std::sync::{Mutex, Arc};
use package_id::PackageId;
use util::{CraftResult, Human, Freshness, internal, ChainError, profile, paths};
use super::job::Work;
use super::{fingerprint, Kind, Context, Unit};
/// Contains the parsed output of a custom build script.
#[derive(Clone, Debug, Hash)]
pub struct BuildOutput {
/// Paths to pass to cc with the `-L` flag
pub library_paths: Vec<PathBuf>,
/// Names and link kinds of libraries, suitable for the `-l` flag
pub library_links: Vec<String>,
/// Metadata to pass to the immediate dependencies
pub metadata: Vec<(String, String)>,
/// Glob paths to trigger a rerun of this build script.
pub rerun_if_changed: Vec<String>,
/// Warnings generated by this build,
pub warnings: Vec<String>,
}
pub type BuildMap = HashMap<(PackageId, Kind), BuildOutput>;
pub struct BuildState {
pub outputs: Mutex<BuildMap>,
overrides: HashMap<(String, Kind), BuildOutput>,
}
#[derive(Default)]
pub struct BuildScripts {
// Craft will use this `to_link` vector to add -L flags to compiles as we
// propagate them upwards towards the final build. Note, however, that we
// need to preserve the ordering of `to_link` to be topologically sorted.
// This will ensure that build scripts which print their paths properly will
// correctly pick up the files they generated (if there are duplicates
// elsewhere).
//
// To preserve this ordering, the (id, kind) is stored in two places, once
// in the `Vec` and once in `seen_to_link` for a fast lookup. We maintain
// this as we're building interactively below to ensure that the memory
// usage here doesn't blow up too much.
//
// For more information, see #2354
pub to_link: Vec<(PackageId, Kind)>,
seen_to_link: HashSet<(PackageId, Kind)>,
pub plugins: BTreeSet<PackageId>,
}
/// Prepares a `Work` that executes the target as a custom build script.
///
/// The `req` given is the requirement which this run of the build script will
/// prepare work for. If the requirement is specified as both the target and the
/// host platforms it is assumed that the two are equal and the build script is
/// only run once (not twice).
pub fn prepare<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CraftResult<(Work, Work, Freshness)> {
let _p = profile::start(format!("build script prepare: {}/{}", unit.pkg, unit.target.name()));
let overridden = cx.build_state.has_override(unit);
let (work_dirty, work_fresh) = if overridden {
(Work::new(|_| Ok(())), Work::new(|_| Ok(())))
} else {
build_work(cx, unit)?
};
// Now that we've prep'd our work, build the work needed to manage the
// fingerprint and then start returning that upwards.
let (freshness, dirty, fresh) = fingerprint::prepare_build_cmd(cx, unit)?;
Ok((work_dirty.then(dirty), work_fresh.then(fresh), freshness))
}
fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CraftResult<(Work, Work)> {
let host_unit = Unit { kind: Kind::Host,..*unit };
let (script_output, build_output) = {
(cx.layout(&host_unit).build(unit.pkg), cx.layout(unit).build_out(unit.pkg))
};
// Building the command to execute
let to_exec = script_output.join(unit.target.name());
// Start preparing the process to execute, starting out with some
// environment variables. Note that the profile-related environment
// variables are not set with this the build script's profile but rather the
// package's library profile.
let profile = cx.lib_profile(unit.pkg.package_id());
let to_exec = to_exec.into_os_string();
let mut cmd = cx.compilation.host_process(to_exec, unit.pkg)?;
cmd.env("OUT_DIR", &build_output)
.env("CRAFT_MANIFEST_DIR", unit.pkg.root())
.env("NUM_JOBS", &cx.jobs().to_string())
.env("TARGET",
&match unit.kind {
Kind::Host => cx.host_triple(),
Kind::Target => cx.target_triple(),
})
.env("DEBUG", &profile.debuginfo.to_string())
.env("OPT_LEVEL", &profile.opt_level)
.env("PROFILE",
if cx.build_config.release {
"release"
} else {
"debug"
})
.env("HOST", cx.host_triple())
.env("CC", &cx.config.cc()?.path)
.env("DOC", &*cx.config.doc()?);
if let Some(links) = unit.pkg.manifest().links() {
cmd.env("CRAFT_MANIFEST_LINKS", links);
}
// Be sure to pass along all enabled features for this package, this is the
// last piece of statically known information that we have.
if let Some(features) = cx.resolve.features(unit.pkg.package_id()) {
for feat in features.iter() {
cmd.env(&format!("CRAFT_FEATURE_{}", super::envify(feat)), "1");
}
}
// Gather the set of native dependencies that this package has along with
// some other variables to close over.
//
// This information will be used at build-time later on to figure out which
// sorts of variables need to be discovered at that time.
let lib_deps = {
cx.dep_run_custom_build(unit)?
.iter()
.filter_map(|unit| {
if unit.profile.run_custom_build {
Some((unit.pkg.manifest().links().unwrap().to_string(), unit.pkg.package_id().clone()))
} else {
None
}
})
.collect::<Vec<_>>()
};
let pkg_name = unit.pkg.to_string();
let build_state = cx.build_state.clone();
let id = unit.pkg.package_id().clone();
let output_file = build_output.parent().unwrap().join("output");
let all = (id.clone(), pkg_name.clone(), build_state.clone(), output_file.clone());
let build_scripts = super::load_build_deps(cx, unit);
let kind = unit.kind;
// Check to see if the build script as already run, and if it has keep
// track of whether it has told us about some explicit dependencies
let prev_output = BuildOutput::parse_file(&output_file, &pkg_name).ok();
let rerun_if_changed = match prev_output {
Some(ref prev) => prev.rerun_if_changed.clone(),
None => Vec::new(),
};
cx.build_explicit_deps.insert(*unit, (output_file.clone(), rerun_if_changed));
fs::create_dir_all(&cx.layout(&host_unit).build(unit.pkg))?;
fs::create_dir_all(&cx.layout(unit).build(unit.pkg))?;
// Prepare the unit of "dirty work" which will actually run the custom build
// command.
//
// Note that this has to do some extra work just before running the command
// to determine extra environment variables and such.
let dirty = Work::new(move |state| {
// Make sure that OUT_DIR exists.
//
// If we have an old build directory, then just move it into place,
// otherwise create it!
if fs::metadata(&build_output).is_err() {
fs::create_dir(&build_output)
.chain_error(|| internal("failed to create script output directory for build command"))?;
}
// For all our native lib dependencies, pick up their metadata to pass
// along to this custom build command. We're also careful to augment our
// dynamic library search path in case the build script depended on any
// native dynamic libraries.
{
let build_state = build_state.outputs.lock().unwrap();
for (name, id) in lib_deps {
let key = (id.clone(), kind);
let state = build_state.get(&key)
.chain_error(|| { | let data = &state.metadata;
for &(ref key, ref value) in data.iter() {
cmd.env(&format!("DEP_{}_{}", super::envify(&name), super::envify(key)),
value);
}
}
if let Some(build_scripts) = build_scripts {
super::add_plugin_deps(&mut cmd, &build_state, &build_scripts)?;
}
}
// And now finally, run the build command itself!
state.running(&cmd);
let output = cmd.exec_with_streaming(&mut |out_line| {
state.stdout(out_line);
Ok(())
},
&mut |err_line| {
state.stderr(err_line);
Ok(())
})
.map_err(|mut e| {
e.desc = format!("failed to run custom build command for `{}`\n{}",
pkg_name,
e.desc);
Human(e)
})?;
paths::write(&output_file, &output.stdout)?;
// After the build command has finished running, we need to be sure to
// remember all of its output so we can later discover precisely what it
// was, even if we don't run the build command again (due to freshness).
//
// This is also the location where we provide feedback into the build
// state informing what variables were discovered via our script as
// well.
let parsed_output = BuildOutput::parse(&output.stdout, &pkg_name)?;
build_state.insert(id, kind, parsed_output);
Ok(())
});
// Now that we've prepared our work-to-do, we need to prepare the fresh work
// itself to run when we actually end up just discarding what we calculated
// above.
let fresh = Work::new(move |_tx| {
let (id, pkg_name, build_state, output_file) = all;
let output = match prev_output {
Some(output) => output,
None => BuildOutput::parse_file(&output_file, &pkg_name)?,
};
build_state.insert(id, kind, output);
Ok(())
});
Ok((dirty, fresh))
}
impl BuildState {
pub fn new(config: &super::BuildConfig) -> BuildState {
let mut overrides = HashMap::new();
let i1 = config.host.overrides.iter().map(|p| (p, Kind::Host));
let i2 = config.target.overrides.iter().map(|p| (p, Kind::Target));
for ((name, output), kind) in i1.chain(i2) {
overrides.insert((name.clone(), kind), output.clone());
}
BuildState {
outputs: Mutex::new(HashMap::new()),
overrides: overrides,
}
}
fn insert(&self, id: PackageId, kind: Kind, output: BuildOutput) {
self.outputs.lock().unwrap().insert((id, kind), output);
}
fn has_override(&self, unit: &Unit) -> bool {
let key = unit.pkg.manifest().links().map(|l| (l.to_string(), unit.kind));
match key.and_then(|k| self.overrides.get(&k)) {
Some(output) => {
self.insert(unit.pkg.package_id().clone(), unit.kind, output.clone());
true
}
None => false,
}
}
}
impl BuildOutput {
pub fn parse_file(path: &Path, pkg_name: &str) -> CraftResult<BuildOutput> {
let contents = paths::read_bytes(path)?;
BuildOutput::parse(&contents, pkg_name)
}
// Parses the output of a script.
// The `pkg_name` is used for error messages.
pub fn parse(input: &[u8], pkg_name: &str) -> CraftResult<BuildOutput> {
let mut library_paths = Vec::new();
let mut library_links = Vec::new();
let mut metadata = Vec::new();
let mut rerun_if_changed = Vec::new();
let mut warnings = Vec::new();
let whence = format!("build script of `{}`", pkg_name);
for line in input.split(|b| *b == b'\n') {
let line = match str::from_utf8(line) {
Ok(line) => line.trim(),
Err(..) => continue,
};
let mut iter = line.splitn(2, ':');
if iter.next()!= Some("craft") {
// skip this line since it doesn't start with "craft:"
continue;
}
let data = match iter.next() {
Some(val) => val,
None => continue,
};
// getting the `key=value` part of the line
let mut iter = data.splitn(2, '=');
let key = iter.next();
let value = iter.next();
let (key, value) = match (key, value) {
(Some(a), Some(b)) => (a, b.trim_right()),
// line started with `craft:` but didn't match `key=value`
_ => bail!("Wrong output in {}: `{}`", whence, line),
};
match key {
"cc-flags" => {
let (libs, links) = BuildOutput::parse_cc_flags(value, &whence)?;
library_links.extend(links.into_iter());
library_paths.extend(libs.into_iter());
}
"cc-link-lib" => library_links.push(value.to_string()),
"cc-link-search" => library_paths.push(PathBuf::from(value)),
"warning" => warnings.push(value.to_string()),
"rerun-if-changed" => rerun_if_changed.push(value.to_string()),
_ => metadata.push((key.to_string(), value.to_string())),
}
}
Ok(BuildOutput {
library_paths: library_paths,
library_links: library_links,
metadata: metadata,
rerun_if_changed: rerun_if_changed,
warnings: warnings,
})
}
pub fn parse_cc_flags(value: &str, whence: &str) -> CraftResult<(Vec<PathBuf>, Vec<String>)> {
let value = value.trim();
let mut flags_iter = value.split(|c: char| c.is_whitespace())
.filter(|w| w.chars().any(|c|!c.is_whitespace()));
let (mut library_links, mut library_paths) = (Vec::new(), Vec::new());
loop {
let flag = match flags_iter.next() {
Some(f) => f,
None => break,
};
if flag!= "-l" && flag!= "-L" {
bail!("Only `-l` and `-L` flags are allowed in {}: `{}`",
whence,
value)
}
let value = match flags_iter.next() {
Some(v) => v,
None => {
bail!("Flag in cc-flags has no value in {}: `{}`",
whence,
value)
}
};
match flag {
"-l" => library_links.push(value.to_string()),
"-L" => library_paths.push(PathBuf::from(value)),
// was already checked above
_ => bail!("only -l and -L flags are allowed"),
};
}
Ok((library_paths, library_links))
}
}
/// Compute the `build_scripts` map in the `Context` which tracks what build
/// scripts each package depends on.
///
/// The global `build_scripts` map lists for all (package, kind) tuples what set
/// of packages' build script outputs must be considered. For example this lists
/// all dependencies' `-L` flags which need to be propagated transitively.
///
/// The given set of targets to this function is the initial set of
/// targets/profiles which are being built.
pub fn build_map<'b, 'cfg>(cx: &mut Context<'b, 'cfg>, units: &[Unit<'b>]) -> CraftResult<()> {
let mut ret = HashMap::new();
for unit in units {
build(&mut ret, cx, unit)?;
}
cx.build_scripts.extend(ret.into_iter().map(|(k, v)| (k, Arc::new(v))));
return Ok(());
// Recursive function to build up the map we're constructing. This function
// memoizes all of its return values as it goes along.
fn build<'a, 'b, 'cfg>(out: &'a mut HashMap<Unit<'b>, BuildScripts>,
cx: &Context<'b, 'cfg>,
unit: &Unit<'b>)
-> CraftResult<&'a BuildScripts> {
// Do a quick pre-flight check to see if we've already calculated the
// set of dependencies.
if out.contains_key(unit) {
return Ok(&out[unit]);
}
let mut ret = BuildScripts::default();
if!unit.target.is_custom_build() && unit.pkg.has_custom_build() {
add_to_link(&mut ret, unit.pkg.package_id(), unit.kind);
}
for unit in cx.dep_targets(unit)?.iter() {
let dep_scripts = build(out, cx, unit)?;
if unit.target.for_host() {
ret.plugins.extend(dep_scripts.to_link
.iter()
.map(|p| &p.0)
.cloned());
} else if unit.target.linkable() {
for &(ref pkg, kind) in dep_scripts.to_link.iter() {
add_to_link(&mut ret, pkg, kind);
}
}
}
let prev = out.entry(*unit).or_insert(BuildScripts::default());
for (pkg, kind) in ret.to_link {
add_to_link(prev, &pkg, kind);
}
prev.plugins.extend(ret.plugins);
Ok(prev)
}
// When adding an entry to 'to_link' we only actually push it on if the
// script hasn't seen it yet (e.g. we don't push on duplicates).
fn add_to_link(scripts: &mut BuildScripts, pkg: &PackageId, kind: Kind) {
if scripts.seen_to_link.insert((pkg.clone(), kind)) {
scripts.to_link.push((pkg.clone(), kind));
}
}
} | internal(format!("failed to locate build state for env vars: {}/{:?}",
id,
kind))
})?; | random_line_split |
wrapper.rs | usize, 0)
}
unsafe fn from_unsafe(n: &UnsafeNode) -> Self {
GeckoNode(&*(n.0 as *mut RawGeckoNode))
}
fn children(self) -> LayoutIterator<GeckoChildrenIterator<'ln>> {
let maybe_iter = unsafe { Gecko_MaybeCreateStyleChildrenIterator(self.0) };
if let Some(iter) = maybe_iter.into_owned_opt() {
LayoutIterator(GeckoChildrenIterator::GeckoIterator(iter))
} else {
LayoutIterator(GeckoChildrenIterator::Current(self.first_child()))
}
}
fn opaque(&self) -> OpaqueNode {
let ptr: usize = self.0 as *const _ as usize;
OpaqueNode(ptr)
}
fn debug_id(self) -> usize {
unimplemented!()
}
fn as_element(&self) -> Option<GeckoElement<'ln>> {
if self.is_element() {
unsafe { Some(GeckoElement(&*(self.0 as *const _ as *const RawGeckoElement))) }
} else {
None
}
}
fn can_be_fragmented(&self) -> bool {
// FIXME(SimonSapin): Servo uses this to implement CSS multicol / fragmentation
// Maybe this isn’t useful for Gecko?
false
}
unsafe fn set_can_be_fragmented(&self, _value: bool) {
// FIXME(SimonSapin): Servo uses this to implement CSS multicol / fragmentation
// Maybe this isn’t useful for Gecko?
}
fn parent_node(&self) -> Option<Self> {
unsafe { bindings::Gecko_GetParentNode(self.0).map(GeckoNode) }
}
fn is_in_doc(&self) -> bool {
unsafe { bindings::Gecko_IsInDocument(self.0) }
}
fn needs_dirty_on_viewport_size_changed(&self) -> bool {
// Gecko's node doesn't have the DIRTY_ON_VIEWPORT_SIZE_CHANGE flag,
// so we force them to be dirtied on viewport size change, regardless if
// they use viewport percentage size or not.
// TODO(shinglyu): implement this in Gecko: https://github.com/servo/servo/pull/11890
true
}
// TODO(shinglyu): implement this in Gecko: https://github.com/servo/servo/pull/11890
unsafe fn set_dirty_on_viewport_size_changed(&self) {}
}
/// A wrapper on top of two kind of iterators, depending on the parent being
/// iterated.
///
/// We generally iterate children by traversing the light-tree siblings of the
/// first child like Servo does.
///
/// However, for nodes with anonymous children, we use a custom (heavier-weight)
/// Gecko-implemented iterator.
///
/// FIXME(emilio): If we take into account shadow DOM, we're going to need the
/// flat tree pretty much always. We can try to optimize the case where there's
/// no shadow root sibling, probably.
pub enum GeckoChildrenIterator<'a> {
/// A simple iterator that tracks the current node being iterated and
/// replaces it with the next sibling when requested.
Current(Option<GeckoNode<'a>>),
/// A Gecko-implemented iterator we need to drop appropriately.
GeckoIterator(bindings::StyleChildrenIteratorOwned),
}
impl<'a> Drop for GeckoChildrenIterator<'a> {
fn drop(&mut self) {
if let GeckoChildrenIterator::GeckoIterator(ref it) = *self {
unsafe {
Gecko_DropStyleChildrenIterator(ptr::read(it as *const _));
}
}
}
}
impl<'a> Iterator for GeckoChildrenIterator<'a> {
type Item = GeckoNode<'a>;
fn next(&mut self) -> Option<GeckoNode<'a>> {
match *self {
GeckoChildrenIterator::Current(curr) => {
let next = curr.and_then(|node| node.next_sibling());
*self = GeckoChildrenIterator::Current(next);
curr
},
GeckoChildrenIterator::GeckoIterator(ref mut it) => unsafe {
Gecko_GetNextStyleChild(it).map(GeckoNode)
}
}
}
}
/// A simple wrapper over a non-null Gecko `Element` pointer.
#[derive(Clone, Copy)]
pub struct GeckoElement<'le>(pub &'le RawGeckoElement);
impl<'le> fmt::Debug for GeckoElement<'le> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "<{}", self.get_local_name()));
if let Some(id) = self.get_id() {
try!(write!(f, " id={}", id));
}
write!(f, "> ({:#x})", self.as_node().opaque().0)
}
}
impl<'le> GeckoElement<'le> {
/// Parse the style attribute of an element.
pub fn parse_style_attribute(value: &str) -> PropertyDeclarationBlock {
// FIXME(bholley): Real base URL and error reporter.
let base_url = &*DUMMY_BASE_URL;
// FIXME(heycam): Needs real ParserContextExtraData so that URLs parse
// properly.
let extra_data = ParserContextExtraData::default();
parse_style_attribute(value, &base_url, Box::new(StdoutErrorReporter), extra_data)
}
fn flags(&self) -> u32 {
self.raw_node()._base._base_1.mFlags
}
fn raw_node(&self) -> &RawGeckoNode {
&(self.0)._base._base._base
}
// FIXME: We can implement this without OOL calls, but we can't easily given
// GeckoNode is a raw reference.
//
// We can use a Cell<T>, but that's a bit of a pain.
fn set_flags(&self, flags: u32) {
unsafe { Gecko_SetNodeFlags(self.as_node().0, flags) }
}
fn unset_flags(&self, flags: u32) {
unsafe { Gecko_UnsetNodeFlags(self.as_node().0, flags) }
}
/// Clear the element data for a given element.
pub fn clear_data(&self) {
let ptr = self.0.mServoData.get();
if!ptr.is_null() {
debug!("Dropping ElementData for {:?}", self);
let data = unsafe { Box::from_raw(self.0.mServoData.get()) };
self.0.mServoData.set(ptr::null_mut());
// Perform a mutable borrow of the data in debug builds. This
// serves as an assertion that there are no outstanding borrows
// when we destroy the data.
debug_assert!({ let _ = data.borrow_mut(); true });
}
}
/// Ensures the element has data, returning the existing data or allocating
/// it.
///
/// Only safe to call with exclusive access to the element, given otherwise
/// it could race to allocate and leak.
pub unsafe fn ensure_data(&self) -> &AtomicRefCell<ElementData> {
match self.get_data() {
Some(x) => x,
None => {
debug!("Creating ElementData for {:?}", self);
let ptr = Box::into_raw(Box::new(AtomicRefCell::new(ElementData::new(None))));
self.0.mServoData.set(ptr);
unsafe { &* ptr }
},
}
}
/// Creates a blank snapshot for this element.
pub fn create_snapshot(&self) -> Snapshot {
Snapshot::new(*self)
}
}
lazy_static! {
/// A dummy base url in order to get it where we don't have any available.
///
/// We need to get rid of this sooner than later.
pub static ref DUMMY_BASE_URL: ServoUrl = {
ServoUrl::parse("http://www.example.org").unwrap()
};
}
impl<'le> TElement for GeckoElement<'le> {
type ConcreteNode = GeckoNode<'le>;
fn as_node(&self) -> Self::ConcreteNode {
unsafe { GeckoNode(&*(self.0 as *const _ as *const RawGeckoNode)) }
}
fn style_attribute(&self) -> Option<&Arc<RwLock<PropertyDeclarationBlock>>> {
let declarations = unsafe { Gecko_GetServoDeclarationBlock(self.0) };
declarations.map(|s| s.as_arc_opt()).unwrap_or(None)
}
fn get_animation_rules(&self, pseudo: Option<&PseudoElement>) -> AnimationRules {
let atom_ptr = pseudo.map(|p| p.as_atom().as_ptr()).unwrap_or(ptr::null_mut());
unsafe {
AnimationRules(
Gecko_GetAnimationRule(self.0, atom_ptr, CascadeLevel::Animations).into_arc_opt(),
Gecko_GetAnimationRule(self.0, atom_ptr, CascadeLevel::Transitions).into_arc_opt())
}
}
fn get_state(&self) -> ElementState {
unsafe {
ElementState::from_bits_truncate(Gecko_ElementState(self.0))
}
}
#[inline]
fn has_attr(&self, namespace: &Namespace, attr: &Atom) -> bool {
unsafe {
bindings::Gecko_HasAttr(self.0,
namespace.0.as_ptr(),
attr.as_ptr())
}
}
#[inline]
fn attr_equals(&self, namespace: &Namespace, attr: &Atom, val: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrEquals(self.0,
namespace.0.as_ptr(),
attr.as_ptr(),
val.as_ptr(),
/* ignoreCase = */ false)
}
}
fn existing_style_for_restyle_damage<'a>(&'a self,
current_cv: Option<&'a Arc<ComputedValues>>,
pseudo: Option<&PseudoElement>)
-> Option<&'a nsStyleContext> {
if current_cv.is_none() {
// Don't bother in doing an ffi call to get null back.
return None;
}
unsafe {
let atom_ptr = pseudo.map(|p| p.as_atom().as_ptr())
.unwrap_or(ptr::null_mut());
let context_ptr = Gecko_GetStyleContext(self.as_node().0, atom_ptr);
context_ptr.as_ref()
}
}
fn has_dirty_descendants(&self) -> bool {
self.flags() & (NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)!= 0
}
unsafe fn set_dirty_descendants(&self) {
debug!("Setting dirty descendants: {:?}", self);
self.set_flags(NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
unsafe fn unset_dirty_descendants(&self) {
self.unset_flags(NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
fn store_children_to_process(&self, _: isize) {
// This is only used for bottom-up traversal, and is thus a no-op for Gecko.
}
fn did_process_child(&self) -> isize {
panic!("Atomic child count not implemented in Gecko");
}
fn get_data(&self) -> Option<&AtomicRefCell<ElementData>> {
unsafe { self.0.mServoData.get().as_ref() }
}
fn skip_root_and_item_based_display_fixup(&self) -> bool {
// We don't want to fix up display values of native anonymous content.
// Additionally, we want to skip root-based display fixup for document
// level native anonymous content subtree roots, since they're not
// really roots from the style fixup perspective. Checking that we
// are NAC handles both cases.
self.flags() & (NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE as u32)!= 0
}
}
impl<'le> PartialEq for GeckoElement<'le> {
fn eq(&self, other: &Self) -> bool {
self.0 as *const _ == other.0 as *const _
}
}
impl<'le> PresentationalHintsSynthetizer for GeckoElement<'le> {
fn synthesize_presentational_hints_for_legacy_attributes<V>(&self, _hints: &mut V)
where V: Push<ApplicableDeclarationBlock>,
{
// FIXME(bholley) - Need to implement this.
}
}
impl<'le> ::selectors::Element for GeckoElement<'le> {
fn parent_element(&self) -> Option<Self> {
unsafe { bindings::Gecko_GetParentElement(self.0).map(GeckoElement) }
}
fn first_child_element(&self) -> Option<Self> {
let mut child = self.as_node().first_child();
while let Some(child_node) = child {
if let Some(el) = child_node.as_element() {
return Some(el)
}
child = child_node.next_sibling();
}
None
}
fn last_child_element(&self) -> Option<Self> {
let mut child = self.as_node().last_child();
while let Some(child_node) = child {
if let Some(el) = child_node.as_element() {
return Some(el)
}
child = child_node.prev_sibling();
}
None
}
fn prev_sibling_element(&self) -> Option<Self> {
let mut sibling = self.as_node().prev_sibling();
while let Some(sibling_node) = sibling {
if let Some(el) = sibling_node.as_element() {
return Some(el)
}
sibling = sibling_node.prev_sibling();
}
None
}
fn next_sibling_element(&self) -> Option<Self> {
let mut sibling = self.as_node().next_sibling();
while let Some(sibling_node) = sibling {
if let Some(el) = sibling_node.as_element() {
return Some(el)
}
sibling = sibling_node.next_sibling();
}
None
}
fn is_root(&self) -> bool {
unsafe {
Gecko_IsRootElement(self.0)
}
}
fn is_empty(&self) -> bool {
// XXX(emilio): Implement this properly.
false
}
fn get_local_name(&self) -> &WeakAtom {
unsafe {
WeakAtom::new(self.as_node().node_info().mInner.mName.raw())
}
}
fn get_namespace(&self) -> &WeakNamespace {
unsafe {
WeakNamespace::new(Gecko_Namespace(self.0))
}
}
fn match_non_ts_pseudo_class(&self, pseudo_class: NonTSPseudoClass) -> bool {
match pseudo_class {
// https://github.com/servo/servo/issues/8718
NonTSPseudoClass::AnyLink => unsafe { Gecko_IsLink(self.0) },
NonTSPseudoClass::Link => unsafe { Gecko_IsUnvisitedLink(self.0) },
NonTSPseudoClass::Visited => unsafe { Gecko_IsVisitedLink(self.0) },
NonTSPseudoClass::Active |
NonTSPseudoClass::Focus |
NonTSPseudoClass::Hover |
NonTSPseudoClass::Enabled |
NonTSPseudoClass::Disabled |
NonTSPseudoClass::Checked |
NonTSPseudoClass::ReadWrite |
NonTSPseudoClass::Fullscreen |
NonTSPseudoClass::Indeterminate => {
self.get_state().contains(pseudo_class.state_flag())
},
NonTSPseudoClass::ReadOnly => {
!self.get_state().contains(pseudo_class.state_flag())
}
NonTSPseudoClass::MozBrowserFrame => unsafe {
Gecko_MatchesElement(pseudo_class.to_gecko_pseudoclasstype().unwrap(), self.0)
}
}
}
fn get_id(&self) -> Option<Atom> {
let ptr = unsafe {
bindings::Gecko_AtomAttrValue(self.0,
atom!("id").as_ptr())
};
if ptr.is_null() {
None
} else {
Some(Atom::from(ptr))
}
}
fn has_class(&self, name: &Atom) -> bool {
snapshot_helpers::has_class(self.0,
name,
Gecko_ClassOrClassList)
}
fn each_class<F>(&self, callback: F)
where F: FnMut(&Atom)
{
snapshot_helpers::each_class(self.0,
callback,
Gecko_ClassOrClassList)
}
fn is_html_element_in_html_document(&self) -> bool {
unsafe {
Gecko_IsHTMLElementInHTMLDocument(self.0)
}
}
}
/// A few helpers to help with attribute selectors and snapshotting.
pub trait AttrSelectorHelpers {
/// Returns the namespace of the selector, or null otherwise.
fn ns_or_null(&self) -> *mut nsIAtom;
/// Returns the proper selector name depending on whether the requesting
/// element is an HTML element in an HTML document or not.
fn select_name(&self, is_html_element_in_html_document: bool) -> *mut nsIAtom;
}
impl AttrSelectorHelpers for AttrSelector<SelectorImpl> {
fn ns_or_null(&self) -> *mut nsIAtom {
match self.namespace {
NamespaceConstraint::Any => ptr::null_mut(),
NamespaceConstraint::Specific(ref ns) => ns.url.0.as_ptr(),
}
}
fn select_name(&self, is_html_element_in_html_document: bool) -> *mut nsIAtom {
if is_html_element_in_html_document {
self.lower_name.as_ptr()
} else {
self.name.as_ptr()
}
}
}
impl<'le> ::selectors::MatchAttr for GeckoElement<'le> {
type Impl = SelectorImpl;
fn match_attr_has(&self, attr: &AttrSelector<Self::Impl>) -> bool {
unsafe {
bindings::Gecko_HasAttr(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()))
}
}
fn match_attr_equals(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrEquals(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr(),
/* ignoreCase = */ false)
}
}
fn match_attr_equals_ignore_ascii_case(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrEquals(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr(),
/* ignoreCase = */ false)
}
}
fn match_attr_includes(&self, attr: &AttrSelector<Self::Impl>, value: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrIncludes(self.0,
attr.ns_or_null(),
attr.select_name(self.is_html_element_in_html_document()),
value.as_ptr())
}
}
fn matc | h_attr_dash(&se | identifier_name |
|
wrapper.rs | )]
pub struct GeckoNode<'ln>(pub &'ln RawGeckoNode);
impl<'ln> fmt::Debug for GeckoNode<'ln> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(el) = self.as_element() {
el.fmt(f)
} else {
if self.is_text_node() {
write!(f, "<text node> ({:#x})", self.opaque().0)
} else {
write!(f, "<non-text node> ({:#x})", self.opaque().0)
}
}
}
}
impl<'ln> GeckoNode<'ln> {
fn from_content(content: &'ln nsIContent) -> Self {
GeckoNode(&content._base)
}
fn node_info(&self) -> &structs::NodeInfo {
debug_assert!(!self.0.mNodeInfo.mRawPtr.is_null());
unsafe { &*self.0.mNodeInfo.mRawPtr }
}
fn first_child(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mFirstChild.as_ref().map(GeckoNode::from_content) }
}
fn last_child(&self) -> Option<GeckoNode<'ln>> {
unsafe { Gecko_GetLastChild(self.0).map(GeckoNode) }
}
fn prev_sibling(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mPreviousSibling.as_ref().map(GeckoNode::from_content) }
}
fn next_sibling(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mNextSibling.as_ref().map(GeckoNode::from_content) }
}
}
impl<'ln> NodeInfo for GeckoNode<'ln> {
fn is_element(&self) -> bool {
use gecko_bindings::structs::nsINode_BooleanFlag;
self.0.mBoolFlags & (1u32 << nsINode_BooleanFlag::NodeIsElement as u32)!= 0
}
fn is_text_node(&self) -> bool {
// This is a DOM constant that isn't going to change.
const TEXT_NODE: u16 = 3;
self.node_info().mInner.mNodeType == TEXT_NODE
}
}
impl<'ln> TNode for GeckoNode<'ln> {
type ConcreteElement = GeckoElement<'ln>;
type ConcreteChildrenIterator = GeckoChildrenIterator<'ln>;
fn to_unsafe(&self) -> UnsafeNode {
(self.0 as *const _ as usize, 0)
}
unsafe fn from_unsafe(n: &UnsafeNode) -> Self {
GeckoNode(&*(n.0 as *mut RawGeckoNode))
}
fn children(self) -> LayoutIterator<GeckoChildrenIterator<'ln>> {
let maybe_iter = unsafe { Gecko_MaybeCreateStyleChildrenIterator(self.0) };
if let Some(iter) = maybe_iter.into_owned_opt() {
LayoutIterator(GeckoChildrenIterator::GeckoIterator(iter))
} else {
LayoutIterator(GeckoChildrenIterator::Current(self.first_child()))
}
}
fn opaque(&self) -> OpaqueNode {
let ptr: usize = self.0 as *const _ as usize;
OpaqueNode(ptr)
}
fn debug_id(self) -> usize {
unimplemented!()
}
fn as_element(&self) -> Option<GeckoElement<'ln>> {
if self.is_element() {
unsafe { Some(GeckoElement(&*(self.0 as *const _ as *const RawGeckoElement))) }
} else {
None
}
}
fn can_be_fragmented(&self) -> bool {
// FIXME(SimonSapin): Servo uses this to implement CSS multicol / fragmentation
// Maybe this isn’t useful for Gecko?
false
}
unsafe fn set_can_be_fragmented(&self, _value: bool) {
// FIXME(SimonSapin): Servo uses this to implement CSS multicol / fragmentation
// Maybe this isn’t useful for Gecko?
}
fn parent_node(&self) -> Option<Self> {
unsafe { bindings::Gecko_GetParentNode(self.0).map(GeckoNode) }
}
fn is_in_doc(&self) -> bool {
unsafe { bindings::Gecko_IsInDocument(self.0) }
}
fn needs_dirty_on_viewport_size_changed(&self) -> bool {
// Gecko's node doesn't have the DIRTY_ON_VIEWPORT_SIZE_CHANGE flag,
// so we force them to be dirtied on viewport size change, regardless if
// they use viewport percentage size or not.
// TODO(shinglyu): implement this in Gecko: https://github.com/servo/servo/pull/11890
true
}
// TODO(shinglyu): implement this in Gecko: https://github.com/servo/servo/pull/11890
unsafe fn set_dirty_on_viewport_size_changed(&self) {}
}
/// A wrapper on top of two kind of iterators, depending on the parent being
/// iterated.
///
/// We generally iterate children by traversing the light-tree siblings of the
/// first child like Servo does.
///
/// However, for nodes with anonymous children, we use a custom (heavier-weight)
/// Gecko-implemented iterator.
///
/// FIXME(emilio): If we take into account shadow DOM, we're going to need the
/// flat tree pretty much always. We can try to optimize the case where there's
/// no shadow root sibling, probably.
pub enum GeckoChildrenIterator<'a> {
/// A simple iterator that tracks the current node being iterated and
/// replaces it with the next sibling when requested.
Current(Option<GeckoNode<'a>>),
/// A Gecko-implemented iterator we need to drop appropriately.
GeckoIterator(bindings::StyleChildrenIteratorOwned),
}
impl<'a> Drop for GeckoChildrenIterator<'a> {
fn drop(&mut self) {
if let GeckoChildrenIterator::GeckoIterator(ref it) = *self {
unsafe {
Gecko_DropStyleChildrenIterator(ptr::read(it as *const _));
}
}
}
}
impl<'a> Iterator for GeckoChildrenIterator<'a> {
type Item = GeckoNode<'a>;
fn next(&mut self) -> Option<GeckoNode<'a>> {
match *self {
GeckoChildrenIterator::Current(curr) => {
let next = curr.and_then(|node| node.next_sibling());
*self = GeckoChildrenIterator::Current(next);
curr
},
GeckoChildrenIterator::GeckoIterator(ref mut it) => unsafe {
Gecko_GetNextStyleChild(it).map(GeckoNode)
}
}
}
}
/// A simple wrapper over a non-null Gecko `Element` pointer.
#[derive(Clone, Copy)]
pub struct GeckoElement<'le>(pub &'le RawGeckoElement);
impl<'le> fmt::Debug for GeckoElement<'le> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "<{}", self.get_local_name()));
if let Some(id) = self.get_id() {
try!(write!(f, " id={}", id));
}
write!(f, "> ({:#x})", self.as_node().opaque().0)
}
}
impl<'le> GeckoElement<'le> {
/// Parse the style attribute of an element.
pub fn parse_style_attribute(value: &str) -> PropertyDeclarationBlock {
// FIXME(bholley): Real base URL and error reporter.
let base_url = &*DUMMY_BASE_URL;
// FIXME(heycam): Needs real ParserContextExtraData so that URLs parse
// properly.
let extra_data = ParserContextExtraData::default();
parse_style_attribute(value, &base_url, Box::new(StdoutErrorReporter), extra_data)
}
fn flags(&self) -> u32 {
self.raw_node()._base._base_1.mFlags
}
fn raw_node(&self) -> &RawGeckoNode {
&(self.0)._base._base._base
}
// FIXME: We can implement this without OOL calls, but we can't easily given
// GeckoNode is a raw reference.
//
// We can use a Cell<T>, but that's a bit of a pain.
fn set_flags(&self, flags: u32) {
unsafe { Gecko_SetNodeFlags(self.as_node().0, flags) }
}
fn unset_flags(&self, flags: u32) {
unsafe { Gecko_UnsetNodeFlags(self.as_node().0, flags) }
}
/// Clear the element data for a given element.
pub fn clear_data(&self) {
let ptr = self.0.mServoData.get();
if!ptr.is_null() {
debug!("Dropping ElementData for {:?}", self);
let data = unsafe { Box::from_raw(self.0.mServoData.get()) };
self.0.mServoData.set(ptr::null_mut());
// Perform a mutable borrow of the data in debug builds. This
// serves as an assertion that there are no outstanding borrows
// when we destroy the data.
debug_assert!({ let _ = data.borrow_mut(); true });
}
}
/// Ensures the element has data, returning the existing data or allocating
/// it.
///
/// Only safe to call with exclusive access to the element, given otherwise
/// it could race to allocate and leak.
pub unsafe fn ensure_data(&self) -> &AtomicRefCell<ElementData> {
match self.get_data() {
Some(x) => x,
None => {
debug!("Creating ElementData for {:?}", self);
let ptr = Box::into_raw(Box::new(AtomicRefCell::new(ElementData::new(None))));
self.0.mServoData.set(ptr);
unsafe { &* ptr }
},
}
}
/// Creates a blank snapshot for this element.
pub fn create_snapshot(&self) -> Snapshot {
Snapshot::new(*self)
}
}
lazy_static! {
/// A dummy base url in order to get it where we don't have any available.
///
/// We need to get rid of this sooner than later.
pub static ref DUMMY_BASE_URL: ServoUrl = {
ServoUrl::parse("http://www.example.org").unwrap()
};
}
impl<'le> TElement for GeckoElement<'le> {
type ConcreteNode = GeckoNode<'le>;
fn as_node(&self) -> Self::ConcreteNode {
unsafe { GeckoNode(&*(self.0 as *const _ as *const RawGeckoNode)) }
}
fn style_attribute(&self) -> Option<&Arc<RwLock<PropertyDeclarationBlock>>> {
let declarations = unsafe { Gecko_GetServoDeclarationBlock(self.0) };
declarations.map(|s| s.as_arc_opt()).unwrap_or(None)
}
fn get_animation_rules(&self, pseudo: Option<&PseudoElement>) -> AnimationRules {
let atom_ptr = pseudo.map(|p| p.as_atom().as_ptr()).unwrap_or(ptr::null_mut());
unsafe {
AnimationRules(
Gecko_GetAnimationRule(self.0, atom_ptr, CascadeLevel::Animations).into_arc_opt(),
Gecko_GetAnimationRule(self.0, atom_ptr, CascadeLevel::Transitions).into_arc_opt())
}
}
fn get_state(&self) -> ElementState {
unsafe {
ElementState::from_bits_truncate(Gecko_ElementState(self.0))
}
}
#[inline]
fn has_attr(&self, namespace: &Namespace, attr: &Atom) -> bool {
unsafe {
bindings::Gecko_HasAttr(self.0,
namespace.0.as_ptr(),
attr.as_ptr())
}
}
#[inline]
fn attr_equals(&self, namespace: &Namespace, attr: &Atom, val: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrEquals(self.0,
namespace.0.as_ptr(),
attr.as_ptr(),
val.as_ptr(),
/* ignoreCase = */ false)
}
}
fn existing_style_for_restyle_damage<'a>(&'a self,
current_cv: Option<&'a Arc<ComputedValues>>,
pseudo: Option<&PseudoElement>)
-> Option<&'a nsStyleContext> {
if current_cv.is_none() {
// Don't bother in doing an ffi call to get null back.
return None;
}
unsafe {
let atom_ptr = pseudo.map(|p| p.as_atom().as_ptr())
.unwrap_or(ptr::null_mut());
let context_ptr = Gecko_GetStyleContext(self.as_node().0, atom_ptr);
context_ptr.as_ref()
}
}
fn has_dirty_descendants(&self) -> bool {
self.flags() & (NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)!= 0
}
unsafe fn set_dirty_descendants(&self) { | self.set_flags(NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
unsafe fn unset_dirty_descendants(&self) {
self.unset_flags(NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
fn store_children_to_process(&self, _: isize) {
// This is only used for bottom-up traversal, and is thus a no-op for Gecko.
}
fn did_process_child(&self) -> isize {
panic!("Atomic child count not implemented in Gecko");
}
fn get_data(&self) -> Option<&AtomicRefCell<ElementData>> {
unsafe { self.0.mServoData.get().as_ref() }
}
fn skip_root_and_item_based_display_fixup(&self) -> bool {
// We don't want to fix up display values of native anonymous content.
// Additionally, we want to skip root-based display fixup for document
// level native anonymous content subtree roots, since they're not
// really roots from the style fixup perspective. Checking that we
// are NAC handles both cases.
self.flags() & (NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE as u32)!= 0
}
}
impl<'le> PartialEq for GeckoElement<'le> {
fn eq(&self, other: &Self) -> bool {
self.0 as *const _ == other.0 as *const _
}
}
impl<'le> PresentationalHintsSynthetizer for GeckoElement<'le> {
fn synthesize_presentational_hints_for_legacy_attributes<V>(&self, _hints: &mut V)
where V: Push<ApplicableDeclarationBlock>,
{
// FIXME(bholley) - Need to implement this.
}
}
impl<'le> ::selectors::Element for GeckoElement<'le> {
fn parent_element(&self) -> Option<Self> {
unsafe { bindings::Gecko_GetParentElement(self.0).map(GeckoElement) }
}
fn first_child_element(&self) -> Option<Self> {
let mut child = self.as_node().first_child();
while let Some(child_node) = child {
if let Some(el) = child_node.as_element() {
return Some(el)
}
child = child_node.next_sibling();
}
None
}
fn last_child_element(&self) -> Option<Self> {
let mut child = self.as_node().last_child();
while let Some(child_node) = child {
if let Some(el) = child_node.as_element() {
return Some(el)
}
child = child_node.prev_sibling();
}
None
}
fn prev_sibling_element(&self) -> Option<Self> {
let mut sibling = self.as_node().prev_sibling();
while let Some(sibling_node) = sibling {
if let Some(el) = sibling_node.as_element() {
return Some(el)
}
sibling = sibling_node.prev_sibling();
}
None
}
fn next_sibling_element(&self) -> Option<Self> {
let mut sibling = self.as_node().next_sibling();
while let Some(sibling_node) = sibling {
if let Some(el) = sibling_node.as_element() {
return Some(el)
}
sibling = sibling_node.next_sibling();
}
None
}
fn is_root(&self) -> bool {
unsafe {
Gecko_IsRootElement(self.0)
}
}
fn is_empty(&self) -> bool {
// XXX(emilio): Implement this properly.
false
}
fn get_local_name(&self) -> &WeakAtom {
unsafe {
WeakAtom::new(self.as_node().node_info().mInner.mName.raw())
}
}
fn get_namespace(&self) -> &WeakNamespace {
unsafe {
WeakNamespace::new(Gecko_Namespace(self.0))
}
}
fn match_non_ts_pseudo_class(&self, pseudo_class: NonTSPseudoClass) -> bool {
match pseudo_class {
// https://github.com/servo/servo/issues/8718
NonTSPseudoClass::AnyLink => unsafe { Gecko_IsLink(self.0) },
NonTSPseudoClass::Link => unsafe { Gecko_IsUnvisitedLink(self.0) },
NonTSPseudoClass::Visited => unsafe { Gecko_IsVisitedLink(self.0) },
NonTSPseudoClass::Active |
NonTSPseudoClass::Focus |
NonTSPseudoClass::Hover |
NonTSPseudoClass::Enabled |
NonTSPseudoClass::Disabled |
NonTSPseudoClass::Checked |
NonTSPseudoClass::ReadWrite |
NonTSPseudoClass::Fullscreen |
NonTSPseudoClass::Indeterminate => {
self.get_state().contains(pseudo_class.state_flag())
},
NonTSPseudoClass::ReadOnly => {
!self.get_state().contains(pseudo_class.state_flag())
}
NonTSPseudoClass::MozBrowserFrame => unsafe {
Gecko_MatchesElement(pseudo_class.to_gecko_pseudoclasstype().unwrap(), self.0)
}
}
}
fn get_id(&self) -> Option<Atom> {
let ptr = unsafe {
bindings::Gecko_AtomAttrValue(self.0,
atom!("id").as_ptr())
};
if ptr.is_null() {
None
} else {
Some(Atom::from(ptr))
}
}
fn has_class(&self, name: &Atom) -> bool {
snapshot_helpers::has_class(self.0,
name,
Gecko_ClassOrClassList)
}
fn each_class<F>(&self, callback: F)
where F: FnMut(&Atom)
{
snapshot_helpers::each_class(self.0,
callback,
Gecko_ClassOrClassList)
}
fn is_html_element_in_html_document(&self) -> bool {
unsafe {
Gecko_IsHTMLElementInHTMLDocument(self.0)
}
}
}
/// A few helpers to help with attribute selectors and snapshotting.
pub trait AttrSelectorHelpers {
/// Returns the namespace of the selector, or null otherwise.
fn ns_or_null(&self) -> *mut nsIAtom;
/// Returns the proper selector name depending on whether the requesting
/// element is an HTML element in an HTML document or not.
fn select_name(&self, is_html_element_in_html_document: bool) -> *mut nsIAtom;
}
impl AttrSelectorHelpers for AttrSelector<SelectorImpl> {
fn ns_or_null(&self) -> *mut nsIAtom {
match self.namespace {
NamespaceConstraint::Any => ptr::null_mut(),
NamespaceConstraint::Specific(ref ns) => ns.url.0.as_ptr(),
}
}
fn select_name(&self, is_html_element_in_html_document: bool) -> *mut nsIAtom {
if is_html_element_in_html_document {
self.lower_name.as_ptr()
} else {
self.name.as_ptr()
}
}
}
impl<'le> ::selectors::MatchAttr for GeckoElement<'le> {
type Impl = SelectorImpl;
fn match_attr_has(&self, attr: &AttrSelector<Self::Impl>) -> bool {
unsafe {
bindings::Gecko_HasAttr(self.0,
attr.ns_or_null(),
| debug!("Setting dirty descendants: {:?}", self); | random_line_split |
wrapper.rs |
pub struct GeckoNode<'ln>(pub &'ln RawGeckoNode);
impl<'ln> fmt::Debug for GeckoNode<'ln> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(el) = self.as_element() {
el.fmt(f)
} else {
if self.is_text_node() {
write!(f, "<text node> ({:#x})", self.opaque().0)
} else {
write!(f, "<non-text node> ({:#x})", self.opaque().0)
}
}
}
}
impl<'ln> GeckoNode<'ln> {
fn from_content(content: &'ln nsIContent) -> Self {
GeckoNode(&content._base)
}
fn node_info(&self) -> &structs::NodeInfo {
debug_assert!(!self.0.mNodeInfo.mRawPtr.is_null());
unsafe { &*self.0.mNodeInfo.mRawPtr }
}
fn first_child(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mFirstChild.as_ref().map(GeckoNode::from_content) }
}
fn last_child(&self) -> Option<GeckoNode<'ln>> {
unsafe { Gecko_GetLastChild(self.0).map(GeckoNode) }
}
fn prev_sibling(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mPreviousSibling.as_ref().map(GeckoNode::from_content) }
}
fn next_sibling(&self) -> Option<GeckoNode<'ln>> {
unsafe { self.0.mNextSibling.as_ref().map(GeckoNode::from_content) }
}
}
impl<'ln> NodeInfo for GeckoNode<'ln> {
fn is_element(&self) -> bool {
use gecko_bindings::structs::nsINode_BooleanFlag;
self.0.mBoolFlags & (1u32 << nsINode_BooleanFlag::NodeIsElement as u32)!= 0
}
fn is_text_node(&self) -> bool {
// This is a DOM constant that isn't going to change.
const TEXT_NODE: u16 = 3;
self.node_info().mInner.mNodeType == TEXT_NODE
}
}
impl<'ln> TNode for GeckoNode<'ln> {
type ConcreteElement = GeckoElement<'ln>;
type ConcreteChildrenIterator = GeckoChildrenIterator<'ln>;
fn to_unsafe(&self) -> UnsafeNode {
(self.0 as *const _ as usize, 0)
}
unsafe fn from_unsafe(n: &UnsafeNode) -> Self {
GeckoNode(&*(n.0 as *mut RawGeckoNode))
}
fn children(self) -> LayoutIterator<GeckoChildrenIterator<'ln>> {
let maybe_iter = unsafe { Gecko_MaybeCreateStyleChildrenIterator(self.0) };
if let Some(iter) = maybe_iter.into_owned_opt() {
LayoutIterator(GeckoChildrenIterator::GeckoIterator(iter))
} else {
LayoutIterator(GeckoChildrenIterator::Current(self.first_child()))
}
}
fn opaque(&self) -> OpaqueNode {
let ptr: usize = self.0 as *const _ as usize;
OpaqueNode(ptr)
}
fn debug_id(self) -> usize {
unimplemented!()
}
fn as_element(&self) -> Option<GeckoElement<'ln>> {
if self.is_element() {
unsafe { Some(GeckoElement(&*(self.0 as *const _ as *const RawGeckoElement))) }
} else {
None
}
}
fn can_be_fragmented(&self) -> bool {
// FIXME(SimonSapin): Servo uses this to implement CSS multicol / fragmentation
// Maybe this isn’t useful for Gecko?
false
}
unsafe fn set_can_be_fragmented(&self, _value: bool) {
// FIXME(SimonSapin): Servo uses this to implement CSS multicol / fragmentation
// Maybe this isn’t useful for Gecko?
}
fn parent_node(&self) -> Option<Self> {
unsafe { bindings::Gecko_GetParentNode(self.0).map(GeckoNode) }
}
fn is_in_doc(&self) -> bool {
unsafe { bindings::Gecko_IsInDocument(self.0) }
}
fn needs_dirty_on_viewport_size_changed(&self) -> bool {
// Gecko's node doesn't have the DIRTY_ON_VIEWPORT_SIZE_CHANGE flag,
// so we force them to be dirtied on viewport size change, regardless if
// they use viewport percentage size or not.
// TODO(shinglyu): implement this in Gecko: https://github.com/servo/servo/pull/11890
true
}
// TODO(shinglyu): implement this in Gecko: https://github.com/servo/servo/pull/11890
unsafe fn set_dirty_on_viewport_size_changed(&self) {}
}
/// A wrapper on top of two kind of iterators, depending on the parent being
/// iterated.
///
/// We generally iterate children by traversing the light-tree siblings of the
/// first child like Servo does.
///
/// However, for nodes with anonymous children, we use a custom (heavier-weight)
/// Gecko-implemented iterator.
///
/// FIXME(emilio): If we take into account shadow DOM, we're going to need the
/// flat tree pretty much always. We can try to optimize the case where there's
/// no shadow root sibling, probably.
pub enum GeckoChildrenIterator<'a> {
/// A simple iterator that tracks the current node being iterated and
/// replaces it with the next sibling when requested.
Current(Option<GeckoNode<'a>>),
/// A Gecko-implemented iterator we need to drop appropriately.
GeckoIterator(bindings::StyleChildrenIteratorOwned),
}
impl<'a> Drop for GeckoChildrenIterator<'a> {
fn drop(&mut self) {
if let GeckoChildrenIterator::GeckoIterator(ref it) = *self {
unsafe {
Gecko_DropStyleChildrenIterator(ptr::read(it as *const _));
}
}
}
}
impl<'a> Iterator for GeckoChildrenIterator<'a> {
type Item = GeckoNode<'a>;
fn next(&mut self) -> Option<GeckoNode<'a>> {
match *self {
GeckoChildrenIterator::Current(curr) => {
let next = curr.and_then(|node| node.next_sibling());
*self = GeckoChildrenIterator::Current(next);
curr
},
GeckoChildrenIterator::GeckoIterator(ref mut it) => unsafe {
Gecko_GetNextStyleChild(it).map(GeckoNode)
}
}
}
}
/// A simple wrapper over a non-null Gecko `Element` pointer.
#[derive(Clone, Copy)]
pub struct GeckoElement<'le>(pub &'le RawGeckoElement);
impl<'le> fmt::Debug for GeckoElement<'le> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "<{}", self.get_local_name()));
if let Some(id) = self.get_id() {
try!(write!(f, " id={}", id));
}
write!(f, "> ({:#x})", self.as_node().opaque().0)
}
}
impl<'le> GeckoElement<'le> {
/// Parse the style attribute of an element.
pub fn parse_style_attribute(value: &str) -> PropertyDeclarationBlock {
// FIXME(bholley): Real base URL and error reporter.
let base_url = &*DUMMY_BASE_URL;
// FIXME(heycam): Needs real ParserContextExtraData so that URLs parse
// properly.
let extra_data = ParserContextExtraData::default();
parse_style_attribute(value, &base_url, Box::new(StdoutErrorReporter), extra_data)
}
fn flags(&self) -> u32 {
self.raw_node()._base._base_1.mFlags
}
fn raw_node(&self) -> &RawGeckoNode {
&(self.0)._base._base._base
}
// FIXME: We can implement this without OOL calls, but we can't easily given
// GeckoNode is a raw reference.
//
// We can use a Cell<T>, but that's a bit of a pain.
fn set_flags(&self, flags: u32) {
unsafe { Gecko_SetNodeFlags(self.as_node().0, flags) }
}
fn unset_flags(&self, flags: u32) {
unsafe { Gecko_UnsetNodeFlags(self.as_node().0, flags) }
}
/// Clear the element data for a given element.
pub fn clear_data(&self) {
| /// Ensures the element has data, returning the existing data or allocating
/// it.
///
/// Only safe to call with exclusive access to the element, given otherwise
/// it could race to allocate and leak.
pub unsafe fn ensure_data(&self) -> &AtomicRefCell<ElementData> {
match self.get_data() {
Some(x) => x,
None => {
debug!("Creating ElementData for {:?}", self);
let ptr = Box::into_raw(Box::new(AtomicRefCell::new(ElementData::new(None))));
self.0.mServoData.set(ptr);
unsafe { &* ptr }
},
}
}
/// Creates a blank snapshot for this element.
pub fn create_snapshot(&self) -> Snapshot {
Snapshot::new(*self)
}
}
lazy_static! {
/// A dummy base url in order to get it where we don't have any available.
///
/// We need to get rid of this sooner than later.
pub static ref DUMMY_BASE_URL: ServoUrl = {
ServoUrl::parse("http://www.example.org").unwrap()
};
}
impl<'le> TElement for GeckoElement<'le> {
type ConcreteNode = GeckoNode<'le>;
fn as_node(&self) -> Self::ConcreteNode {
unsafe { GeckoNode(&*(self.0 as *const _ as *const RawGeckoNode)) }
}
fn style_attribute(&self) -> Option<&Arc<RwLock<PropertyDeclarationBlock>>> {
let declarations = unsafe { Gecko_GetServoDeclarationBlock(self.0) };
declarations.map(|s| s.as_arc_opt()).unwrap_or(None)
}
fn get_animation_rules(&self, pseudo: Option<&PseudoElement>) -> AnimationRules {
let atom_ptr = pseudo.map(|p| p.as_atom().as_ptr()).unwrap_or(ptr::null_mut());
unsafe {
AnimationRules(
Gecko_GetAnimationRule(self.0, atom_ptr, CascadeLevel::Animations).into_arc_opt(),
Gecko_GetAnimationRule(self.0, atom_ptr, CascadeLevel::Transitions).into_arc_opt())
}
}
fn get_state(&self) -> ElementState {
unsafe {
ElementState::from_bits_truncate(Gecko_ElementState(self.0))
}
}
#[inline]
fn has_attr(&self, namespace: &Namespace, attr: &Atom) -> bool {
unsafe {
bindings::Gecko_HasAttr(self.0,
namespace.0.as_ptr(),
attr.as_ptr())
}
}
#[inline]
fn attr_equals(&self, namespace: &Namespace, attr: &Atom, val: &Atom) -> bool {
unsafe {
bindings::Gecko_AttrEquals(self.0,
namespace.0.as_ptr(),
attr.as_ptr(),
val.as_ptr(),
/* ignoreCase = */ false)
}
}
fn existing_style_for_restyle_damage<'a>(&'a self,
current_cv: Option<&'a Arc<ComputedValues>>,
pseudo: Option<&PseudoElement>)
-> Option<&'a nsStyleContext> {
if current_cv.is_none() {
// Don't bother in doing an ffi call to get null back.
return None;
}
unsafe {
let atom_ptr = pseudo.map(|p| p.as_atom().as_ptr())
.unwrap_or(ptr::null_mut());
let context_ptr = Gecko_GetStyleContext(self.as_node().0, atom_ptr);
context_ptr.as_ref()
}
}
fn has_dirty_descendants(&self) -> bool {
self.flags() & (NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)!= 0
}
unsafe fn set_dirty_descendants(&self) {
debug!("Setting dirty descendants: {:?}", self);
self.set_flags(NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
unsafe fn unset_dirty_descendants(&self) {
self.unset_flags(NODE_HAS_DIRTY_DESCENDANTS_FOR_SERVO as u32)
}
fn store_children_to_process(&self, _: isize) {
// This is only used for bottom-up traversal, and is thus a no-op for Gecko.
}
fn did_process_child(&self) -> isize {
panic!("Atomic child count not implemented in Gecko");
}
fn get_data(&self) -> Option<&AtomicRefCell<ElementData>> {
unsafe { self.0.mServoData.get().as_ref() }
}
fn skip_root_and_item_based_display_fixup(&self) -> bool {
// We don't want to fix up display values of native anonymous content.
// Additionally, we want to skip root-based display fixup for document
// level native anonymous content subtree roots, since they're not
// really roots from the style fixup perspective. Checking that we
// are NAC handles both cases.
self.flags() & (NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE as u32)!= 0
}
}
impl<'le> PartialEq for GeckoElement<'le> {
fn eq(&self, other: &Self) -> bool {
self.0 as *const _ == other.0 as *const _
}
}
impl<'le> PresentationalHintsSynthetizer for GeckoElement<'le> {
fn synthesize_presentational_hints_for_legacy_attributes<V>(&self, _hints: &mut V)
where V: Push<ApplicableDeclarationBlock>,
{
// FIXME(bholley) - Need to implement this.
}
}
impl<'le> ::selectors::Element for GeckoElement<'le> {
fn parent_element(&self) -> Option<Self> {
unsafe { bindings::Gecko_GetParentElement(self.0).map(GeckoElement) }
}
fn first_child_element(&self) -> Option<Self> {
let mut child = self.as_node().first_child();
while let Some(child_node) = child {
if let Some(el) = child_node.as_element() {
return Some(el)
}
child = child_node.next_sibling();
}
None
}
fn last_child_element(&self) -> Option<Self> {
let mut child = self.as_node().last_child();
while let Some(child_node) = child {
if let Some(el) = child_node.as_element() {
return Some(el)
}
child = child_node.prev_sibling();
}
None
}
fn prev_sibling_element(&self) -> Option<Self> {
let mut sibling = self.as_node().prev_sibling();
while let Some(sibling_node) = sibling {
if let Some(el) = sibling_node.as_element() {
return Some(el)
}
sibling = sibling_node.prev_sibling();
}
None
}
fn next_sibling_element(&self) -> Option<Self> {
let mut sibling = self.as_node().next_sibling();
while let Some(sibling_node) = sibling {
if let Some(el) = sibling_node.as_element() {
return Some(el)
}
sibling = sibling_node.next_sibling();
}
None
}
fn is_root(&self) -> bool {
unsafe {
Gecko_IsRootElement(self.0)
}
}
fn is_empty(&self) -> bool {
// XXX(emilio): Implement this properly.
false
}
fn get_local_name(&self) -> &WeakAtom {
unsafe {
WeakAtom::new(self.as_node().node_info().mInner.mName.raw())
}
}
fn get_namespace(&self) -> &WeakNamespace {
unsafe {
WeakNamespace::new(Gecko_Namespace(self.0))
}
}
fn match_non_ts_pseudo_class(&self, pseudo_class: NonTSPseudoClass) -> bool {
match pseudo_class {
// https://github.com/servo/servo/issues/8718
NonTSPseudoClass::AnyLink => unsafe { Gecko_IsLink(self.0) },
NonTSPseudoClass::Link => unsafe { Gecko_IsUnvisitedLink(self.0) },
NonTSPseudoClass::Visited => unsafe { Gecko_IsVisitedLink(self.0) },
NonTSPseudoClass::Active |
NonTSPseudoClass::Focus |
NonTSPseudoClass::Hover |
NonTSPseudoClass::Enabled |
NonTSPseudoClass::Disabled |
NonTSPseudoClass::Checked |
NonTSPseudoClass::ReadWrite |
NonTSPseudoClass::Fullscreen |
NonTSPseudoClass::Indeterminate => {
self.get_state().contains(pseudo_class.state_flag())
},
NonTSPseudoClass::ReadOnly => {
!self.get_state().contains(pseudo_class.state_flag())
}
NonTSPseudoClass::MozBrowserFrame => unsafe {
Gecko_MatchesElement(pseudo_class.to_gecko_pseudoclasstype().unwrap(), self.0)
}
}
}
fn get_id(&self) -> Option<Atom> {
let ptr = unsafe {
bindings::Gecko_AtomAttrValue(self.0,
atom!("id").as_ptr())
};
if ptr.is_null() {
None
} else {
Some(Atom::from(ptr))
}
}
fn has_class(&self, name: &Atom) -> bool {
snapshot_helpers::has_class(self.0,
name,
Gecko_ClassOrClassList)
}
fn each_class<F>(&self, callback: F)
where F: FnMut(&Atom)
{
snapshot_helpers::each_class(self.0,
callback,
Gecko_ClassOrClassList)
}
fn is_html_element_in_html_document(&self) -> bool {
unsafe {
Gecko_IsHTMLElementInHTMLDocument(self.0)
}
}
}
/// A few helpers to help with attribute selectors and snapshotting.
pub trait AttrSelectorHelpers {
/// Returns the namespace of the selector, or null otherwise.
fn ns_or_null(&self) -> *mut nsIAtom;
/// Returns the proper selector name depending on whether the requesting
/// element is an HTML element in an HTML document or not.
fn select_name(&self, is_html_element_in_html_document: bool) -> *mut nsIAtom;
}
impl AttrSelectorHelpers for AttrSelector<SelectorImpl> {
fn ns_or_null(&self) -> *mut nsIAtom {
match self.namespace {
NamespaceConstraint::Any => ptr::null_mut(),
NamespaceConstraint::Specific(ref ns) => ns.url.0.as_ptr(),
}
}
fn select_name(&self, is_html_element_in_html_document: bool) -> *mut nsIAtom {
if is_html_element_in_html_document {
self.lower_name.as_ptr()
} else {
self.name.as_ptr()
}
}
}
impl<'le> ::selectors::MatchAttr for GeckoElement<'le> {
type Impl = SelectorImpl;
fn match_attr_has(&self, attr: &AttrSelector<Self::Impl>) -> bool {
unsafe {
bindings::Gecko_HasAttr(self.0,
attr.ns_or_null(),
| let ptr = self.0.mServoData.get();
if !ptr.is_null() {
debug!("Dropping ElementData for {:?}", self);
let data = unsafe { Box::from_raw(self.0.mServoData.get()) };
self.0.mServoData.set(ptr::null_mut());
// Perform a mutable borrow of the data in debug builds. This
// serves as an assertion that there are no outstanding borrows
// when we destroy the data.
debug_assert!({ let _ = data.borrow_mut(); true });
}
}
| identifier_body |
lib.rs | // Copyright 2017 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
//! # Overview
//!
//! <b>cuckoo-miner</b> is a Rust wrapper around John Tromp's Cuckoo Miner
//! C implementations, intended primarily for use in the Grin MimbleWimble
//! blockhain development project. However, it is also suitable for use as
//! a standalone miner or by any other project needing to use the
//! cuckoo cycle proof of work. cuckoo-miner is plugin based, and provides
//! a high level interface to load and work with C mining implementations.
//!
//! A brief description of basic operations follows, as well as some
//! examples of how cuckoo miner should be called.
//!
//! ## Interfaces
//!
//! The library provides 2 high level interfaces:
//!
//! The [CuckooPluginManager](struct.CuckooPluginManager.html)
//! takes care of querying and loading plugins. A caller can provide a directory
//! for the plugin manager to scan, and the manager will load each plugin and
//! populate a [CuckooPluginCapabilities](struct.CuckooPluginCapabilities.html)
//! for each, which will contain a description of the plugin as well as any parameters
//! that can be configured.
//!
//! The [CuckooMiner](struct.CuckooMiner.html) struct provides a
//! high-level interface that a caller can use to load and run one or many
//! simultaneous plugin mining implementations.
//!
//! ## Operational Modes
//!
//! The miner can be run in either synchronous or asynchronous mode.
//!
//! Syncronous mode uses the [`mine`](struct.CuckooMiner.html#method.mine) function,
//! which takes a complete hash, processes it within the calling thread via the plugin's
//! [`call_cuckoo`](struct.PluginLibrary.html#method.call_cuckoo) function,
//! and returns the result.
//!
//! Asynchronous mode uses the [`notify`](struct.CuckoMiner.html#method.notify)
//! function, which takes the pre-nonce and
//! post-nonce parts of a block header, mutates it internally with a nonce, and
//! inserts the resulting hash into the plugin's internal queue for processing.
//! Solutions are placed into an output queue, which the calling thread can
//! read ascynronously via a [job handle](struct.CuckooMinerJobHandle.html).
//!
//! Examples of using either mode follow:
//!
//! ## Example - Sync mode
//! ```
//! extern crate cuckoo_miner as cuckoo;
//! extern crate time;
//!
//! use std::path::PathBuf;
//!
//! let mut d = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
//! d.push("target/debug/plugins/");
//!
//! // load plugin manager
//! let mut plugin_manager = cuckoo::CuckooPluginManager::new().unwrap();
//! plugin_manager
//! .load_plugin_dir(String::from(d.to_str().unwrap()))
//! .expect("");
//!
//! // Load a single plugin using a filter
//! let caps=plugin_manager.get_available_plugins("lean_cpu_16").unwrap();
//! let mut config_vec=Vec::new();
//! let mut config = cuckoo::CuckooMinerConfig::new();
//! config.plugin_full_path = caps[0].full_path.clone();
//! config_vec.push(config);
//!
//! let duration_in_seconds=60;
//! let stat_check_interval = 3;
//! let deadline = time::get_time().sec + duration_in_seconds;
//! let mut next_stat_check = time::get_time().sec + stat_check_interval;
//!
//! let mut i=0;
//! println!("Test mining for {} seconds, looking for difficulty > 0", duration_in_seconds);
//! for c in config_vec.clone().into_iter(){
//! println!("Plugin (Sync Mode): {}", c.plugin_full_path);
//! }
//!
//! while time::get_time().sec < deadline {
//! let miner = cuckoo::CuckooMiner::new(config_vec.clone()).expect("");
//! //Mining with a dummy header here, but in reality this would be a passed in
//! //header hash
//! let mut header:[u8; 32] = [0;32];
//! let mut iterations=0;
//! let mut solution = cuckoo::CuckooMinerSolution::new();
//! loop {
//! header[0]=i;
//! //Mine on plugin loaded at index 0 (which should be only one loaded in
//! //Sync mode
//! let result = miner.mine(&header, &mut solution, 0).unwrap();
//! iterations+=1;
//! if result == true {
//! println!("Solution found after {} iterations: {}", i, solution);
//! println!("For hash: {:?}", header);
//! break;
//! }
//! if time::get_time().sec > deadline {
//! println!("Exiting after {} iterations", iterations);
//! break;
//! }
//! if time::get_time().sec >= next_stat_check {
//! let stats_vec=miner.get_stats(0).unwrap();
//! for s in stats_vec.into_iter() {
//! let last_solution_time_secs = s.last_solution_time as f64 / 1000.0;
//! let last_hashes_per_sec = 1.0 / last_solution_time_secs;
//! println!("Plugin 0 - Device {} ({}) - Last Solution time: {}; Solutions per second: {:.*}",
//! s.device_id, s.device_name, last_solution_time_secs, 3, last_hashes_per_sec);
//! }
//! next_stat_check = time::get_time().sec + stat_check_interval;
//! }
//! i+=1;
//! if i==255 {
//! i=0;
//! }
//! # break;
//! }
//! # break;
//! }
//! ```
//!
//! ## Example - Async mode
//! ```
//! extern crate cuckoo_miner as cuckoo;
//! extern crate time;
//!
//! use std::path::PathBuf;
//!
//! //Grin Pre and Post headers, into which a nonce is to be insterted for mutation
//! let SAMPLE_GRIN_PRE_HEADER_1:&str = "00000000000000118e0fe6bcfaa76c6795592339f27b6d330d8f9c4ac8e86171a66357d1\
//! d0fce808000000005971f14f0000000000000000000000000000000000000000000000000000000000000000\
//! 3e1fcdd453ce51ffbb16dd200aeb9ef7375aec196e97094868428a7325e4a19b00";
//!
//! let SAMPLE_GRIN_POST_HEADER_1:&str = "010a020364";
//!
//! let mut d = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
//! d.push("target/debug/plugins/");
//!
//! // load plugin manager
//! let mut plugin_manager = cuckoo::CuckooPluginManager::new().unwrap();
//! plugin_manager
//! .load_plugin_dir(String::from(d.to_str().unwrap()))
//! .expect("");
//!
//! // Load a single pugin using a filter
//! let caps=plugin_manager.get_available_plugins("lean_cpu_16").unwrap();
//! let mut config_vec=Vec::new();
//! let mut config = cuckoo::CuckooMinerConfig::new();
//! config.plugin_full_path = caps[0].full_path.clone();
//! config_vec.push(config);
//!
//! let duration_in_seconds=60;
//! let stat_check_interval = 3;
//! let deadline = time::get_time().sec + duration_in_seconds;
//! let mut next_stat_check = time::get_time().sec + stat_check_interval;
//! let mut stats_updated=false;
//!
//! while time::get_time().sec < deadline {
//!
//! println!("Test mining for {} seconds, looking for difficulty > 0", duration_in_seconds);
//! let mut i=0;
//! for c in config_vec.clone().into_iter(){
//! println!("Plugin {}: {}", i, c.plugin_full_path);
//! i+=1;
//! }
//!
//! // these always get consumed after a notify
//! let miner = cuckoo::CuckooMiner::new(config_vec.clone()).expect("");
//! let job_handle = miner.notify(1, SAMPLE_GRIN_PRE_HEADER_1, SAMPLE_GRIN_POST_HEADER_1, 0).unwrap();
//!
//! loop {
//! if let Some(s) = job_handle.get_solution() {
//! println!("Sol found: {}, {:?}", s.get_nonce_as_u64(), s);
//! // up to you to read it and check difficulty
//! continue;
//! }
//! if time::get_time().sec >= next_stat_check {
//! let mut sps_total=0.0;
//! for index in 0..config_vec.len() {
//! let stats_vec=job_handle.get_stats(index).unwrap();
//! for s in stats_vec.into_iter() {
//! let last_solution_time_secs = s.last_solution_time as f64 / 1000.0;
//! let last_hashes_per_sec = 1.0 / last_solution_time_secs;
//! println!("Plugin {} - Device {} ({}) - Last Solution time: {}; Solutions per second: {:.*}",
//! index,s.device_id, s.device_name, last_solution_time_secs, 3, last_hashes_per_sec);
//! if last_hashes_per_sec.is_finite() {
//! sps_total+=last_hashes_per_sec;
//! }
//! if last_solution_time_secs > 0.0 {
//! stats_updated = true;
//! }
//! i+=1;
//! }
//! }
//! println!("Total solutions per second: {}", sps_total);
//! next_stat_check = time::get_time().sec + stat_check_interval;
//! }
//! if time::get_time().sec > deadline {
//! println!("Stopping jobs and waiting for cleanup");
//! job_handle.stop_jobs();
//! break;
//! }
//! # break;
//! }
//! # break;
//! }
//! ```
#![deny(non_upper_case_globals)]
#![deny(non_camel_case_types)]
#![deny(non_snake_case)]
#![deny(unused_mut)]
#![warn(missing_docs)]
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
extern crate regex;
extern crate rand;
extern crate byteorder;
extern crate crypto;
extern crate blake2_rfc as blake2;
extern crate libloading as libloading;
extern crate libc;
extern crate glob;
mod error;
mod miner;
mod manager;
mod cuckoo_sys;
pub use error::error::CuckooMinerError;
pub use miner::miner::{CuckooMinerConfig, CuckooMiner, CuckooMinerSolution, CuckooMinerJobHandle,
CuckooMinerDeviceStats};
pub use manager::manager::{CuckooPluginManager, CuckooPluginCapabilities};
pub use cuckoo_sys::manager::PluginLibrary; | // See the License for the specific language governing permissions and
// limitations under the License. | random_line_split |
mem_map.rs | const BOOT_ROM_START: u16 = 0x0000;
const BOOT_ROM_END: u16 = 0x00FF;
const CART_ROM_START: u16 = 0x0000;
const CART_ROM_END: u16 = 0x7FFF;
const CART_ENTRY_POINT: u16 = 0x0100;
const CART_HEADER_START: u16 = 0x0100;
const CART_HEADER_END: u16 = 0x014F;
const CART_FIXED_START: u16 = 0x0150;
const CART_FIXED_END: u16 = 0x3FFF;
const CART_SWITCH_START: u16 = 0x4000;
const CART_SWITCH_END: u16 = 0x7FFF;
const VIDEO_RAM_START: u16 = 0x8000;
const VIDEO_RAM_END: u16 = 0x9FFF;
const SOUND_REG_START: u16 = 0xFF10;
const SOUND_REG_END: u16 = 0xFF3F;
// http://gbdev.gg8.se/wiki/articles/Video_Display
const LCD_CONTROL_REGISTER: u16 = 0xFF40; // R/W
const LCD_STATUS_REGISTER: u16 = 0xFF41; // R/W
const SCROLL_Y: u16 = 0xFF42; // R/W
const SCROLL_X: u16 = 0xFF43; // R/W
const LCD_LY: u16 = 0xFF44; // R
const LCD_LYC: u16 = 0xFF45; // R/W
const LCD_WY: u16 = 0xFF4A; // R/W
const LCD_WX: u16 = 0xFF4B; // R/W
const BG_PALETTE_DATA: u16 = 0xFF47; // R/W
const OBJECT_PALETTE_0: u16 = 0xFF48; // R/W
const OBJECT_PALETTE_1: u16 = 0xFF49; // R/W
const HIGH_RAM_START: u16 = 0xFF80;
const HIGH_RAM_END: u16 = 0xFFFE;
pub fn map_addr(addr: u16) -> Addr {
match addr {
BOOT_ROM_START...BOOT_ROM_END => Addr::BootRom(addr - BOOT_ROM_START),
CART_HEADER_START...CART_HEADER_END => Addr::CartHeader(addr - CART_ROM_START),
CART_FIXED_START...CART_FIXED_END => Addr::CartFixed(addr - CART_ROM_START),
CART_SWITCH_START...CART_SWITCH_END => Addr::CartSwitch(addr - CART_ROM_START),
VIDEO_RAM_START...VIDEO_RAM_END => Addr::VideoRam(addr - VIDEO_RAM_START),
SOUND_REG_START...SOUND_REG_END => Addr::SoundRegister(addr - SOUND_REG_START),
HIGH_RAM_START...HIGH_RAM_END => Addr::HighRam(addr - HIGH_RAM_START),
_ => panic!("Unrecognised physical address: {:#x}", addr),
}
}
pub enum Addr {
BootRom(u16),
CartHeader(u16),
CartFixed(u16),
CartSwitch(u16),
VideoRam(u16),
SoundRegister(u16),
HighRam(u16),
}
pub fn cartridge_type(byte: u8) -> &'static str {
match byte {
0x00 => "ROM ONLY",
0x01 => "MBC1",
0x02 => "MBC1+RAM",
0x03 => "MBC1+RAM+BATTERY",
0x05 => "MBC2",
0x06 => "MBC2+BATTERY",
0x08 => "ROM+RAM",
0x09 => "ROM+RAM+BATTERY",
0x0B => "MMM01",
0x0C => "MMM01+RAM",
0x0D => "MMM01+RAM+BATTERY",
0x0F => "MBC3+TIMER+BATTERY",
0x10 => "MBC3+TIMER+RAM+BATTERY",
0x11 => "MBC3",
0x12 => "MBC3+RAM",
0x13 => "MBC3+RAM+BATTERY",
0x15 => "MBC4",
0x16 => "MBC4+RAM",
0x17 => "MBC4+RAM+BATTERY",
0x19 => "MBC5",
0x1A => "MBC5+RAM",
0x1B => "MBC5+RAM+BATTERY",
0x1C => "MBC5+RUMBLE",
0x1D => "MBC5+RUMBLE+RAM",
0x1E => "MBC5+RUMBLE+RAM+BATTERY",
0x20 => "MBC6",
0x22 => "MBC7+SENSOR+RUMBLE+RAM+BATTERY",
0xFC => "POCKET CAMERA",
0xFD => "BANDAI TAMA5",
0xFE => "HuC3",
0xFF => "HuC1+RAM+BATTERY",
_ => panic!("Unknown Cartridge Type"),
}
}
pub fn rom_size(byte: u8) -> &'static str |
pub fn ram_size(byte: u8) -> &'static str {
match byte {
0x00 => "None",
0x01 => "2 KBytes",
0x02 => "8 Kbytes",
0x03 => "32 KBytes (4 banks of 8KBytes each)",
0x04 => "128 KBytes (16 banks of 8KBytes each)",
0x05 => "64 KBytes (8 banks of 8KBytes each)",
_ => panic!("Unknown RAM Size")
}
}
| {
match byte {
0x00 => "32KByte (no ROM banking)",
0x01 => "64KByte (4 banks)",
0x02 => "128KByte (8 banks)",
0x03 => "256KByte (16 banks)",
0x04 => "512KByte (32 banks)",
0x05 => "1MByte (64 banks)",
0x06 => "2MByte (128 banks)",
0x07 => "4MByte (256 banks)",
0x52 => "1.1MByte (72 banks)",
0x53 => "1.2MByte (80 banks)",
0x54 => "1.5MByte (96 banks)",
_ => panic!("Unknown ROM Size")
}
} | identifier_body |
mem_map.rs | const BOOT_ROM_START: u16 = 0x0000;
const BOOT_ROM_END: u16 = 0x00FF;
const CART_ROM_START: u16 = 0x0000;
const CART_ROM_END: u16 = 0x7FFF;
const CART_ENTRY_POINT: u16 = 0x0100;
const CART_HEADER_START: u16 = 0x0100;
const CART_HEADER_END: u16 = 0x014F;
const CART_FIXED_START: u16 = 0x0150;
const CART_FIXED_END: u16 = 0x3FFF;
const CART_SWITCH_START: u16 = 0x4000;
const CART_SWITCH_END: u16 = 0x7FFF;
const VIDEO_RAM_START: u16 = 0x8000;
const VIDEO_RAM_END: u16 = 0x9FFF;
const SOUND_REG_START: u16 = 0xFF10;
const SOUND_REG_END: u16 = 0xFF3F;
// http://gbdev.gg8.se/wiki/articles/Video_Display
const LCD_CONTROL_REGISTER: u16 = 0xFF40; // R/W
const LCD_STATUS_REGISTER: u16 = 0xFF41; // R/W
const SCROLL_Y: u16 = 0xFF42; // R/W
const SCROLL_X: u16 = 0xFF43; // R/W
const LCD_LY: u16 = 0xFF44; // R
const LCD_LYC: u16 = 0xFF45; // R/W
const LCD_WY: u16 = 0xFF4A; // R/W
const LCD_WX: u16 = 0xFF4B; // R/W
const BG_PALETTE_DATA: u16 = 0xFF47; // R/W
const OBJECT_PALETTE_0: u16 = 0xFF48; // R/W
const OBJECT_PALETTE_1: u16 = 0xFF49; // R/W
const HIGH_RAM_START: u16 = 0xFF80;
const HIGH_RAM_END: u16 = 0xFFFE;
pub fn map_addr(addr: u16) -> Addr {
match addr {
BOOT_ROM_START...BOOT_ROM_END => Addr::BootRom(addr - BOOT_ROM_START),
CART_HEADER_START...CART_HEADER_END => Addr::CartHeader(addr - CART_ROM_START),
CART_FIXED_START...CART_FIXED_END => Addr::CartFixed(addr - CART_ROM_START),
CART_SWITCH_START...CART_SWITCH_END => Addr::CartSwitch(addr - CART_ROM_START),
VIDEO_RAM_START...VIDEO_RAM_END => Addr::VideoRam(addr - VIDEO_RAM_START),
SOUND_REG_START...SOUND_REG_END => Addr::SoundRegister(addr - SOUND_REG_START),
HIGH_RAM_START...HIGH_RAM_END => Addr::HighRam(addr - HIGH_RAM_START),
_ => panic!("Unrecognised physical address: {:#x}", addr),
}
}
pub enum | {
BootRom(u16),
CartHeader(u16),
CartFixed(u16),
CartSwitch(u16),
VideoRam(u16),
SoundRegister(u16),
HighRam(u16),
}
pub fn cartridge_type(byte: u8) -> &'static str {
match byte {
0x00 => "ROM ONLY",
0x01 => "MBC1",
0x02 => "MBC1+RAM",
0x03 => "MBC1+RAM+BATTERY",
0x05 => "MBC2",
0x06 => "MBC2+BATTERY",
0x08 => "ROM+RAM",
0x09 => "ROM+RAM+BATTERY",
0x0B => "MMM01",
0x0C => "MMM01+RAM",
0x0D => "MMM01+RAM+BATTERY",
0x0F => "MBC3+TIMER+BATTERY",
0x10 => "MBC3+TIMER+RAM+BATTERY",
0x11 => "MBC3",
0x12 => "MBC3+RAM",
0x13 => "MBC3+RAM+BATTERY",
0x15 => "MBC4",
0x16 => "MBC4+RAM",
0x17 => "MBC4+RAM+BATTERY",
0x19 => "MBC5",
0x1A => "MBC5+RAM",
0x1B => "MBC5+RAM+BATTERY",
0x1C => "MBC5+RUMBLE",
0x1D => "MBC5+RUMBLE+RAM",
0x1E => "MBC5+RUMBLE+RAM+BATTERY",
0x20 => "MBC6",
0x22 => "MBC7+SENSOR+RUMBLE+RAM+BATTERY",
0xFC => "POCKET CAMERA",
0xFD => "BANDAI TAMA5",
0xFE => "HuC3",
0xFF => "HuC1+RAM+BATTERY",
_ => panic!("Unknown Cartridge Type"),
}
}
pub fn rom_size(byte: u8) -> &'static str {
match byte {
0x00 => "32KByte (no ROM banking)",
0x01 => "64KByte (4 banks)",
0x02 => "128KByte (8 banks)",
0x03 => "256KByte (16 banks)",
0x04 => "512KByte (32 banks)",
0x05 => "1MByte (64 banks)",
0x06 => "2MByte (128 banks)",
0x07 => "4MByte (256 banks)",
0x52 => "1.1MByte (72 banks)",
0x53 => "1.2MByte (80 banks)",
0x54 => "1.5MByte (96 banks)",
_ => panic!("Unknown ROM Size")
}
}
pub fn ram_size(byte: u8) -> &'static str {
match byte {
0x00 => "None",
0x01 => "2 KBytes",
0x02 => "8 Kbytes",
0x03 => "32 KBytes (4 banks of 8KBytes each)",
0x04 => "128 KBytes (16 banks of 8KBytes each)",
0x05 => "64 KBytes (8 banks of 8KBytes each)",
_ => panic!("Unknown RAM Size")
}
}
| Addr | identifier_name |
mem_map.rs | const BOOT_ROM_START: u16 = 0x0000;
const BOOT_ROM_END: u16 = 0x00FF;
const CART_ROM_START: u16 = 0x0000;
const CART_ROM_END: u16 = 0x7FFF;
const CART_ENTRY_POINT: u16 = 0x0100;
const CART_HEADER_START: u16 = 0x0100;
const CART_HEADER_END: u16 = 0x014F;
const CART_FIXED_START: u16 = 0x0150;
const CART_FIXED_END: u16 = 0x3FFF;
const CART_SWITCH_START: u16 = 0x4000;
const CART_SWITCH_END: u16 = 0x7FFF;
const VIDEO_RAM_START: u16 = 0x8000;
const VIDEO_RAM_END: u16 = 0x9FFF;
const SOUND_REG_START: u16 = 0xFF10;
const SOUND_REG_END: u16 = 0xFF3F;
// http://gbdev.gg8.se/wiki/articles/Video_Display
const LCD_CONTROL_REGISTER: u16 = 0xFF40; // R/W
const LCD_STATUS_REGISTER: u16 = 0xFF41; // R/W
const SCROLL_Y: u16 = 0xFF42; // R/W
const SCROLL_X: u16 = 0xFF43; // R/W
const LCD_LY: u16 = 0xFF44; // R
const LCD_LYC: u16 = 0xFF45; // R/W
const LCD_WY: u16 = 0xFF4A; // R/W
const LCD_WX: u16 = 0xFF4B; // R/W
const BG_PALETTE_DATA: u16 = 0xFF47; // R/W
const OBJECT_PALETTE_0: u16 = 0xFF48; // R/W
const OBJECT_PALETTE_1: u16 = 0xFF49; // R/W
const HIGH_RAM_START: u16 = 0xFF80;
const HIGH_RAM_END: u16 = 0xFFFE;
pub fn map_addr(addr: u16) -> Addr {
match addr {
BOOT_ROM_START...BOOT_ROM_END => Addr::BootRom(addr - BOOT_ROM_START),
CART_HEADER_START...CART_HEADER_END => Addr::CartHeader(addr - CART_ROM_START),
CART_FIXED_START...CART_FIXED_END => Addr::CartFixed(addr - CART_ROM_START),
CART_SWITCH_START...CART_SWITCH_END => Addr::CartSwitch(addr - CART_ROM_START),
VIDEO_RAM_START...VIDEO_RAM_END => Addr::VideoRam(addr - VIDEO_RAM_START),
SOUND_REG_START...SOUND_REG_END => Addr::SoundRegister(addr - SOUND_REG_START),
HIGH_RAM_START...HIGH_RAM_END => Addr::HighRam(addr - HIGH_RAM_START),
_ => panic!("Unrecognised physical address: {:#x}", addr),
}
}
pub enum Addr {
BootRom(u16),
CartHeader(u16),
CartFixed(u16),
CartSwitch(u16),
VideoRam(u16),
SoundRegister(u16),
HighRam(u16),
}
pub fn cartridge_type(byte: u8) -> &'static str {
match byte {
0x00 => "ROM ONLY",
0x01 => "MBC1",
0x02 => "MBC1+RAM",
0x03 => "MBC1+RAM+BATTERY",
0x05 => "MBC2",
0x06 => "MBC2+BATTERY",
0x08 => "ROM+RAM",
0x09 => "ROM+RAM+BATTERY",
0x0B => "MMM01",
0x0C => "MMM01+RAM",
0x0D => "MMM01+RAM+BATTERY",
0x0F => "MBC3+TIMER+BATTERY",
0x10 => "MBC3+TIMER+RAM+BATTERY",
0x11 => "MBC3",
0x12 => "MBC3+RAM",
0x13 => "MBC3+RAM+BATTERY",
0x15 => "MBC4",
0x16 => "MBC4+RAM",
0x17 => "MBC4+RAM+BATTERY",
0x19 => "MBC5",
0x1A => "MBC5+RAM",
0x1B => "MBC5+RAM+BATTERY",
0x1C => "MBC5+RUMBLE",
0x1D => "MBC5+RUMBLE+RAM",
0x1E => "MBC5+RUMBLE+RAM+BATTERY",
0x20 => "MBC6",
0x22 => "MBC7+SENSOR+RUMBLE+RAM+BATTERY",
0xFC => "POCKET CAMERA",
0xFD => "BANDAI TAMA5",
0xFE => "HuC3", |
pub fn rom_size(byte: u8) -> &'static str {
match byte {
0x00 => "32KByte (no ROM banking)",
0x01 => "64KByte (4 banks)",
0x02 => "128KByte (8 banks)",
0x03 => "256KByte (16 banks)",
0x04 => "512KByte (32 banks)",
0x05 => "1MByte (64 banks)",
0x06 => "2MByte (128 banks)",
0x07 => "4MByte (256 banks)",
0x52 => "1.1MByte (72 banks)",
0x53 => "1.2MByte (80 banks)",
0x54 => "1.5MByte (96 banks)",
_ => panic!("Unknown ROM Size")
}
}
pub fn ram_size(byte: u8) -> &'static str {
match byte {
0x00 => "None",
0x01 => "2 KBytes",
0x02 => "8 Kbytes",
0x03 => "32 KBytes (4 banks of 8KBytes each)",
0x04 => "128 KBytes (16 banks of 8KBytes each)",
0x05 => "64 KBytes (8 banks of 8KBytes each)",
_ => panic!("Unknown RAM Size")
}
} | 0xFF => "HuC1+RAM+BATTERY",
_ => panic!("Unknown Cartridge Type"),
}
} | random_line_split |
ent.rs | use serde::{Deserialize, Serialize};
use std::collections::HashMap;
#[derive(Serialize, Deserialize)]
pub struct PutRequest {
pub blobs: Vec<Vec<u8>>,
}
#[derive(Serialize, Deserialize)]
pub struct GetRequest {
pub items: Vec<GetItem>,
}
#[derive(Serialize, Deserialize)]
pub struct GetItem {
pub root: String,
// pub path: Vec<Selector>,
}
#[derive(Serialize, Deserialize)]
pub struct GetResponse {
pub items: HashMap<String, String>,
}
pub const API_URL_LOCALHOST: &str = "http://127.0.0.1:8088";
pub const API_URL_REMOTE: &str = "https://multiverse-312721.nw.r.appspot.com";
pub struct | {
pub api_url: String,
}
impl EntClient {
pub async fn upload_blob(&self, content: &[u8]) -> Result<(), Box<dyn std::error::Error>> {
let req = PutRequest {
blobs: vec![content.to_vec()],
};
self.upload_blobs(&req).await?;
Ok(())
}
pub async fn upload_blobs(&self, req: &PutRequest) -> Result<(), Box<dyn std::error::Error>> {
let req_json = serde_json::to_string(&req)?;
reqwasm::http::Request::post(&format!("{}/api/v1/blobs/put", self.api_url))
.body(req_json)
.send()
.await
.map(|res| ())
.map_err(|e| e.into())
}
pub async fn get_blobs(
&self,
req: &GetRequest,
) -> Result<GetResponse, Box<dyn std::error::Error>> {
let req_json = serde_json::to_string(&req)?;
let res = reqwasm::http::Request::post(&format!("{}/api/v1/blobs/get", self.api_url))
.body(req_json)
.send()
.await?;
let res_json = res.json().await?;
Ok(res_json)
}
}
| EntClient | identifier_name |
ent.rs | use serde::{Deserialize, Serialize};
use std::collections::HashMap;
#[derive(Serialize, Deserialize)]
pub struct PutRequest {
pub blobs: Vec<Vec<u8>>,
}
#[derive(Serialize, Deserialize)]
pub struct GetRequest {
pub items: Vec<GetItem>,
}
#[derive(Serialize, Deserialize)]
pub struct GetItem {
pub root: String,
// pub path: Vec<Selector>,
}
#[derive(Serialize, Deserialize)]
pub struct GetResponse {
pub items: HashMap<String, String>,
}
pub const API_URL_LOCALHOST: &str = "http://127.0.0.1:8088";
pub const API_URL_REMOTE: &str = "https://multiverse-312721.nw.r.appspot.com";
pub struct EntClient {
pub api_url: String,
}
impl EntClient {
pub async fn upload_blob(&self, content: &[u8]) -> Result<(), Box<dyn std::error::Error>> {
let req = PutRequest {
blobs: vec![content.to_vec()],
}; | }
pub async fn upload_blobs(&self, req: &PutRequest) -> Result<(), Box<dyn std::error::Error>> {
let req_json = serde_json::to_string(&req)?;
reqwasm::http::Request::post(&format!("{}/api/v1/blobs/put", self.api_url))
.body(req_json)
.send()
.await
.map(|res| ())
.map_err(|e| e.into())
}
pub async fn get_blobs(
&self,
req: &GetRequest,
) -> Result<GetResponse, Box<dyn std::error::Error>> {
let req_json = serde_json::to_string(&req)?;
let res = reqwasm::http::Request::post(&format!("{}/api/v1/blobs/get", self.api_url))
.body(req_json)
.send()
.await?;
let res_json = res.json().await?;
Ok(res_json)
}
} | self.upload_blobs(&req).await?;
Ok(()) | random_line_split |
borrowed-c-style-enum.rs | // compile-flags:-g
// min-lldb-version: 310
// === GDB TESTS ===================================================================================
// gdb-command:run
// gdb-command:print *the_a_ref
// gdbg-check:$1 = TheA
// gdbr-check:$1 = borrowed_c_style_enum::ABC::TheA
// gdb-command:print *the_b_ref
// gdbg-check:$2 = TheB
// gdbr-check:$2 = borrowed_c_style_enum::ABC::TheB
// gdb-command:print *the_c_ref
// gdbg-check:$3 = TheC
// gdbr-check:$3 = borrowed_c_style_enum::ABC::TheC
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print *the_a_ref
// lldbg-check:[...]$0 = TheA
// lldbr-check:(borrowed_c_style_enum::ABC) *the_a_ref = borrowed_c_style_enum::ABC::TheA
// lldb-command:print *the_b_ref
// lldbg-check:[...]$1 = TheB
// lldbr-check:(borrowed_c_style_enum::ABC) *the_b_ref = borrowed_c_style_enum::ABC::TheB
// lldb-command:print *the_c_ref
// lldbg-check:[...]$2 = TheC
// lldbr-check:(borrowed_c_style_enum::ABC) *the_c_ref = borrowed_c_style_enum::ABC::TheC
#![allow(unused_variables)]
#![feature(omit_gdb_pretty_printer_section)]
#![omit_gdb_pretty_printer_section]
enum ABC { TheA, TheB, TheC }
fn | () {
let the_a = ABC::TheA;
let the_a_ref: &ABC = &the_a;
let the_b = ABC::TheB;
let the_b_ref: &ABC = &the_b;
let the_c = ABC::TheC;
let the_c_ref: &ABC = &the_c;
zzz(); // #break
}
fn zzz() {()}
| main | identifier_name |
borrowed-c-style-enum.rs | // compile-flags:-g
// min-lldb-version: 310
// === GDB TESTS ===================================================================================
// gdb-command:run
// gdb-command:print *the_a_ref
// gdbg-check:$1 = TheA
// gdbr-check:$1 = borrowed_c_style_enum::ABC::TheA
// gdb-command:print *the_b_ref
// gdbg-check:$2 = TheB
// gdbr-check:$2 = borrowed_c_style_enum::ABC::TheB
// gdb-command:print *the_c_ref
// gdbg-check:$3 = TheC
// gdbr-check:$3 = borrowed_c_style_enum::ABC::TheC
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print *the_a_ref
// lldbg-check:[...]$0 = TheA
// lldbr-check:(borrowed_c_style_enum::ABC) *the_a_ref = borrowed_c_style_enum::ABC::TheA
// lldb-command:print *the_b_ref
// lldbg-check:[...]$1 = TheB
// lldbr-check:(borrowed_c_style_enum::ABC) *the_b_ref = borrowed_c_style_enum::ABC::TheB
// lldb-command:print *the_c_ref
// lldbg-check:[...]$2 = TheC
// lldbr-check:(borrowed_c_style_enum::ABC) *the_c_ref = borrowed_c_style_enum::ABC::TheC
#![allow(unused_variables)]
#![feature(omit_gdb_pretty_printer_section)]
#![omit_gdb_pretty_printer_section]
enum ABC { TheA, TheB, TheC }
fn main() {
let the_a = ABC::TheA;
let the_a_ref: &ABC = &the_a;
let the_b = ABC::TheB;
let the_b_ref: &ABC = &the_b;
let the_c = ABC::TheC;
let the_c_ref: &ABC = &the_c;
zzz(); // #break
}
fn zzz() | {()} | identifier_body |
|
borrowed-c-style-enum.rs | // compile-flags:-g
// min-lldb-version: 310
// === GDB TESTS ===================================================================================
// gdb-command:run
// gdb-command:print *the_a_ref | // gdbr-check:$2 = borrowed_c_style_enum::ABC::TheB
// gdb-command:print *the_c_ref
// gdbg-check:$3 = TheC
// gdbr-check:$3 = borrowed_c_style_enum::ABC::TheC
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print *the_a_ref
// lldbg-check:[...]$0 = TheA
// lldbr-check:(borrowed_c_style_enum::ABC) *the_a_ref = borrowed_c_style_enum::ABC::TheA
// lldb-command:print *the_b_ref
// lldbg-check:[...]$1 = TheB
// lldbr-check:(borrowed_c_style_enum::ABC) *the_b_ref = borrowed_c_style_enum::ABC::TheB
// lldb-command:print *the_c_ref
// lldbg-check:[...]$2 = TheC
// lldbr-check:(borrowed_c_style_enum::ABC) *the_c_ref = borrowed_c_style_enum::ABC::TheC
#![allow(unused_variables)]
#![feature(omit_gdb_pretty_printer_section)]
#![omit_gdb_pretty_printer_section]
enum ABC { TheA, TheB, TheC }
fn main() {
let the_a = ABC::TheA;
let the_a_ref: &ABC = &the_a;
let the_b = ABC::TheB;
let the_b_ref: &ABC = &the_b;
let the_c = ABC::TheC;
let the_c_ref: &ABC = &the_c;
zzz(); // #break
}
fn zzz() {()} | // gdbg-check:$1 = TheA
// gdbr-check:$1 = borrowed_c_style_enum::ABC::TheA
// gdb-command:print *the_b_ref
// gdbg-check:$2 = TheB | random_line_split |
ext.rs | use std::io;
use std::mem;
use std::net::SocketAddr;
use std::os::unix::io::RawFd;
use libc;
use sys::unix::err::cvt;
#[inline]
#[allow(dead_code)]
pub fn pipe() -> io::Result<(RawFd, RawFd)> {
let mut fds = [0 as libc::c_int; 2];
cvt(unsafe { libc::pipe2(fds.as_mut_ptr(), libc::O_CLOEXEC | libc::O_NONBLOCK) })?;
Ok((fds[0], fds[1]))
}
#[inline]
pub fn socket_v4() -> io::Result<RawFd> |
#[inline]
pub fn socket_v6() -> io::Result<RawFd> {
let res = unsafe {
libc::socket(
libc::AF_INET6,
libc::SOCK_STREAM | libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC,
0,
)
};
cvt(res)
}
#[inline]
pub fn accept(listener_fd: RawFd) -> io::Result<(RawFd, SocketAddr)> {
let mut storage: libc::sockaddr_storage = unsafe { mem::uninitialized() };
let mut len = mem::size_of::<libc::sockaddr_storage>() as libc::socklen_t;
let res = unsafe {
libc::accept4(
listener_fd,
&mut storage as *mut _ as *mut _,
&mut len,
libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC,
)
};
let sock = cvt(res)?;
let addr = super::sockaddr_to_addr(&storage, len as usize)?;
Ok((sock, addr))
}
| {
let res = unsafe {
libc::socket(
libc::AF_INET,
libc::SOCK_STREAM | libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC,
0,
)
};
cvt(res)
} | identifier_body |
ext.rs | use std::io;
use std::mem;
use std::net::SocketAddr;
use std::os::unix::io::RawFd;
use libc;
use sys::unix::err::cvt;
#[inline]
#[allow(dead_code)]
pub fn pipe() -> io::Result<(RawFd, RawFd)> {
let mut fds = [0 as libc::c_int; 2];
cvt(unsafe { libc::pipe2(fds.as_mut_ptr(), libc::O_CLOEXEC | libc::O_NONBLOCK) })?;
Ok((fds[0], fds[1]))
}
#[inline]
pub fn | () -> io::Result<RawFd> {
let res = unsafe {
libc::socket(
libc::AF_INET,
libc::SOCK_STREAM | libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC,
0,
)
};
cvt(res)
}
#[inline]
pub fn socket_v6() -> io::Result<RawFd> {
let res = unsafe {
libc::socket(
libc::AF_INET6,
libc::SOCK_STREAM | libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC,
0,
)
};
cvt(res)
}
#[inline]
pub fn accept(listener_fd: RawFd) -> io::Result<(RawFd, SocketAddr)> {
let mut storage: libc::sockaddr_storage = unsafe { mem::uninitialized() };
let mut len = mem::size_of::<libc::sockaddr_storage>() as libc::socklen_t;
let res = unsafe {
libc::accept4(
listener_fd,
&mut storage as *mut _ as *mut _,
&mut len,
libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC,
)
};
let sock = cvt(res)?;
let addr = super::sockaddr_to_addr(&storage, len as usize)?;
Ok((sock, addr))
}
| socket_v4 | identifier_name |
ext.rs | use std::net::SocketAddr;
use std::os::unix::io::RawFd;
use libc;
use sys::unix::err::cvt;
#[inline]
#[allow(dead_code)]
pub fn pipe() -> io::Result<(RawFd, RawFd)> {
let mut fds = [0 as libc::c_int; 2];
cvt(unsafe { libc::pipe2(fds.as_mut_ptr(), libc::O_CLOEXEC | libc::O_NONBLOCK) })?;
Ok((fds[0], fds[1]))
}
#[inline]
pub fn socket_v4() -> io::Result<RawFd> {
let res = unsafe {
libc::socket(
libc::AF_INET,
libc::SOCK_STREAM | libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC,
0,
)
};
cvt(res)
}
#[inline]
pub fn socket_v6() -> io::Result<RawFd> {
let res = unsafe {
libc::socket(
libc::AF_INET6,
libc::SOCK_STREAM | libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC,
0,
)
};
cvt(res)
}
#[inline]
pub fn accept(listener_fd: RawFd) -> io::Result<(RawFd, SocketAddr)> {
let mut storage: libc::sockaddr_storage = unsafe { mem::uninitialized() };
let mut len = mem::size_of::<libc::sockaddr_storage>() as libc::socklen_t;
let res = unsafe {
libc::accept4(
listener_fd,
&mut storage as *mut _ as *mut _,
&mut len,
libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC,
)
};
let sock = cvt(res)?;
let addr = super::sockaddr_to_addr(&storage, len as usize)?;
Ok((sock, addr))
} | use std::io;
use std::mem; | random_line_split |
|
normalize_projection_ty.rs | use rustc_infer::infer::canonical::{Canonical, QueryResponse};
use rustc_infer::infer::TyCtxtInferExt;
use rustc_infer::traits::TraitEngineExt as _;
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{ParamEnvAnd, TyCtxt};
use rustc_trait_selection::infer::InferCtxtBuilderExt;
use rustc_trait_selection::traits::query::{
normalize::NormalizationResult, CanonicalProjectionGoal, NoSolution,
};
use rustc_trait_selection::traits::{self, ObligationCause, SelectionContext};
use std::sync::atomic::Ordering;
crate fn provide(p: &mut Providers) |
fn normalize_projection_ty<'tcx>(
tcx: TyCtxt<'tcx>,
goal: CanonicalProjectionGoal<'tcx>,
) -> Result<&'tcx Canonical<'tcx, QueryResponse<'tcx, NormalizationResult<'tcx>>>, NoSolution> {
debug!("normalize_provider(goal={:#?})", goal);
tcx.sess.perf_stats.normalize_projection_ty.fetch_add(1, Ordering::Relaxed);
tcx.infer_ctxt().enter_canonical_trait_query(
&goal,
|infcx, fulfill_cx, ParamEnvAnd { param_env, value: goal }| {
let selcx = &mut SelectionContext::new(infcx);
let cause = ObligationCause::dummy();
let mut obligations = vec![];
let answer = traits::normalize_projection_type(
selcx,
param_env,
goal,
cause,
0,
&mut obligations,
);
fulfill_cx.register_predicate_obligations(infcx, obligations);
Ok(NormalizationResult { normalized_ty: answer })
},
)
}
| {
*p = Providers { normalize_projection_ty, ..*p };
} | identifier_body |
normalize_projection_ty.rs | use rustc_infer::infer::canonical::{Canonical, QueryResponse};
use rustc_infer::infer::TyCtxtInferExt;
use rustc_infer::traits::TraitEngineExt as _;
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{ParamEnvAnd, TyCtxt};
use rustc_trait_selection::infer::InferCtxtBuilderExt;
use rustc_trait_selection::traits::query::{
normalize::NormalizationResult, CanonicalProjectionGoal, NoSolution,
};
use rustc_trait_selection::traits::{self, ObligationCause, SelectionContext};
use std::sync::atomic::Ordering;
crate fn provide(p: &mut Providers) {
*p = Providers { normalize_projection_ty,..*p };
}
fn | <'tcx>(
tcx: TyCtxt<'tcx>,
goal: CanonicalProjectionGoal<'tcx>,
) -> Result<&'tcx Canonical<'tcx, QueryResponse<'tcx, NormalizationResult<'tcx>>>, NoSolution> {
debug!("normalize_provider(goal={:#?})", goal);
tcx.sess.perf_stats.normalize_projection_ty.fetch_add(1, Ordering::Relaxed);
tcx.infer_ctxt().enter_canonical_trait_query(
&goal,
|infcx, fulfill_cx, ParamEnvAnd { param_env, value: goal }| {
let selcx = &mut SelectionContext::new(infcx);
let cause = ObligationCause::dummy();
let mut obligations = vec![];
let answer = traits::normalize_projection_type(
selcx,
param_env,
goal,
cause,
0,
&mut obligations,
);
fulfill_cx.register_predicate_obligations(infcx, obligations);
Ok(NormalizationResult { normalized_ty: answer })
},
)
}
| normalize_projection_ty | identifier_name |
normalize_projection_ty.rs | use rustc_infer::infer::canonical::{Canonical, QueryResponse};
use rustc_infer::infer::TyCtxtInferExt;
use rustc_infer::traits::TraitEngineExt as _;
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{ParamEnvAnd, TyCtxt};
use rustc_trait_selection::infer::InferCtxtBuilderExt;
use rustc_trait_selection::traits::query::{
normalize::NormalizationResult, CanonicalProjectionGoal, NoSolution,
};
use rustc_trait_selection::traits::{self, ObligationCause, SelectionContext};
use std::sync::atomic::Ordering;
crate fn provide(p: &mut Providers) {
*p = Providers { normalize_projection_ty,..*p };
}
fn normalize_projection_ty<'tcx>(
tcx: TyCtxt<'tcx>,
goal: CanonicalProjectionGoal<'tcx>,
) -> Result<&'tcx Canonical<'tcx, QueryResponse<'tcx, NormalizationResult<'tcx>>>, NoSolution> {
debug!("normalize_provider(goal={:#?})", goal);
tcx.sess.perf_stats.normalize_projection_ty.fetch_add(1, Ordering::Relaxed);
tcx.infer_ctxt().enter_canonical_trait_query(
&goal,
|infcx, fulfill_cx, ParamEnvAnd { param_env, value: goal }| {
let selcx = &mut SelectionContext::new(infcx);
let cause = ObligationCause::dummy();
let mut obligations = vec![];
let answer = traits::normalize_projection_type(
selcx,
param_env, | );
fulfill_cx.register_predicate_obligations(infcx, obligations);
Ok(NormalizationResult { normalized_ty: answer })
},
)
} | goal,
cause,
0,
&mut obligations, | random_line_split |
x86_64_linux_android.rs | // https://developer.android.com/ndk/guides/abis.html#86-64
base.features = "+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+popcnt".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.entry(LinkerFlavor::Gcc).or_default().push("-m64".to_string());
// don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
llvm_target: "x86_64-linux-android".to_string(),
pointer_width: 64,
data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
.to_string(),
arch: "x86_64".to_string(),
options: base,
}
} | use crate::spec::{LinkerFlavor, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::android_base::opts();
base.cpu = "x86-64".to_string(); | random_line_split |
|
x86_64_linux_android.rs | use crate::spec::{LinkerFlavor, StackProbeType, Target};
pub fn | () -> Target {
let mut base = super::android_base::opts();
base.cpu = "x86-64".to_string();
// https://developer.android.com/ndk/guides/abis.html#86-64
base.features = "+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+popcnt".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.entry(LinkerFlavor::Gcc).or_default().push("-m64".to_string());
// don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
llvm_target: "x86_64-linux-android".to_string(),
pointer_width: 64,
data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
.to_string(),
arch: "x86_64".to_string(),
options: base,
}
}
| target | identifier_name |
x86_64_linux_android.rs | use crate::spec::{LinkerFlavor, StackProbeType, Target};
pub fn target() -> Target | {
let mut base = super::android_base::opts();
base.cpu = "x86-64".to_string();
// https://developer.android.com/ndk/guides/abis.html#86-64
base.features = "+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+popcnt".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.entry(LinkerFlavor::Gcc).or_default().push("-m64".to_string());
// don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
llvm_target: "x86_64-linux-android".to_string(),
pointer_width: 64,
data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
.to_string(),
arch: "x86_64".to_string(),
options: base,
}
} | identifier_body |
|
sync.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use ethsync::PeerInfo as SyncPeerInfo;
use serde::{Serialize, Serializer};
use v1::types::U256;
/// Sync info
#[derive(Default, Debug, Serialize, PartialEq)]
pub struct SyncInfo {
/// Starting block
#[serde(rename="startingBlock")]
pub starting_block: U256,
/// Current block
#[serde(rename="currentBlock")]
pub current_block: U256,
/// Highest block seen so far
#[serde(rename="highestBlock")]
pub highest_block: U256,
}
/// Peers info
#[derive(Default, Debug, Serialize)]
pub struct Peers {
/// Number of active peers
pub active: usize,
/// Number of connected peers
pub connected: usize,
/// Max number of peers
pub max: u32,
/// Detailed information on peers
pub peers: Vec<PeerInfo>,
}
/// Peer connection information
#[derive(Default, Debug, Serialize)]
pub struct PeerInfo {
/// Public node id
pub id: Option<String>, | /// Network information
pub network: PeerNetworkInfo,
/// Protocols information
pub protocols: PeerProtocolsInfo,
}
/// Peer network information
#[derive(Default, Debug, Serialize)]
pub struct PeerNetworkInfo {
/// Remote endpoint address
#[serde(rename="remoteAddress")]
pub remote_address: String,
/// Local endpoint address
#[serde(rename="localAddress")]
pub local_address: String,
}
/// Peer protocols information
#[derive(Default, Debug, Serialize)]
pub struct PeerProtocolsInfo {
/// Ethereum protocol information
pub eth: Option<PeerEthereumProtocolInfo>,
}
/// Peer Ethereum protocol information
#[derive(Default, Debug, Serialize)]
pub struct PeerEthereumProtocolInfo {
/// Negotiated ethereum protocol version
pub version: u32,
/// Peer total difficulty if known
pub difficulty: Option<U256>,
/// SHA3 of peer best block hash
pub head: String,
}
/// Sync status
#[derive(Debug, PartialEq)]
pub enum SyncStatus {
/// Info when syncing
Info(SyncInfo),
/// Not syncing
None
}
impl Serialize for SyncStatus {
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
where S: Serializer {
match *self {
SyncStatus::Info(ref info) => info.serialize(serializer),
SyncStatus::None => false.serialize(serializer)
}
}
}
impl From<SyncPeerInfo> for PeerInfo {
fn from(p: SyncPeerInfo) -> PeerInfo {
PeerInfo {
id: p.id,
name: p.client_version,
caps: p.capabilities,
network: PeerNetworkInfo {
remote_address: p.remote_address,
local_address: p.local_address,
},
protocols: PeerProtocolsInfo {
eth: Some(PeerEthereumProtocolInfo {
version: p.eth_version,
difficulty: p.eth_difficulty.map(|d| d.into()),
head: p.eth_head.hex(),
})
},
}
}
}
#[cfg(test)]
mod tests {
use serde_json;
use super::{SyncInfo, SyncStatus, Peers};
#[test]
fn test_serialize_sync_info() {
let t = SyncInfo::default();
let serialized = serde_json::to_string(&t).unwrap();
assert_eq!(serialized, r#"{"startingBlock":"0x0","currentBlock":"0x0","highestBlock":"0x0"}"#);
}
#[test]
fn test_serialize_peers() {
let t = Peers::default();
let serialized = serde_json::to_string(&t).unwrap();
assert_eq!(serialized, r#"{"active":0,"connected":0,"max":0,"peers":[]}"#);
}
#[test]
fn test_serialize_sync_status() {
let t = SyncStatus::None;
let serialized = serde_json::to_string(&t).unwrap();
assert_eq!(serialized, "false");
let t = SyncStatus::Info(SyncInfo::default());
let serialized = serde_json::to_string(&t).unwrap();
assert_eq!(serialized, r#"{"startingBlock":"0x0","currentBlock":"0x0","highestBlock":"0x0"}"#);
}
} | /// Node client ID
pub name: String,
/// Capabilities
pub caps: Vec<String>, | random_line_split |
sync.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use ethsync::PeerInfo as SyncPeerInfo;
use serde::{Serialize, Serializer};
use v1::types::U256;
/// Sync info
#[derive(Default, Debug, Serialize, PartialEq)]
pub struct SyncInfo {
/// Starting block
#[serde(rename="startingBlock")]
pub starting_block: U256,
/// Current block
#[serde(rename="currentBlock")]
pub current_block: U256,
/// Highest block seen so far
#[serde(rename="highestBlock")]
pub highest_block: U256,
}
/// Peers info
#[derive(Default, Debug, Serialize)]
pub struct Peers {
/// Number of active peers
pub active: usize,
/// Number of connected peers
pub connected: usize,
/// Max number of peers
pub max: u32,
/// Detailed information on peers
pub peers: Vec<PeerInfo>,
}
/// Peer connection information
#[derive(Default, Debug, Serialize)]
pub struct PeerInfo {
/// Public node id
pub id: Option<String>,
/// Node client ID
pub name: String,
/// Capabilities
pub caps: Vec<String>,
/// Network information
pub network: PeerNetworkInfo,
/// Protocols information
pub protocols: PeerProtocolsInfo,
}
/// Peer network information
#[derive(Default, Debug, Serialize)]
pub struct PeerNetworkInfo {
/// Remote endpoint address
#[serde(rename="remoteAddress")]
pub remote_address: String,
/// Local endpoint address
#[serde(rename="localAddress")]
pub local_address: String,
}
/// Peer protocols information
#[derive(Default, Debug, Serialize)]
pub struct PeerProtocolsInfo {
/// Ethereum protocol information
pub eth: Option<PeerEthereumProtocolInfo>,
}
/// Peer Ethereum protocol information
#[derive(Default, Debug, Serialize)]
pub struct PeerEthereumProtocolInfo {
/// Negotiated ethereum protocol version
pub version: u32,
/// Peer total difficulty if known
pub difficulty: Option<U256>,
/// SHA3 of peer best block hash
pub head: String,
}
/// Sync status
#[derive(Debug, PartialEq)]
pub enum SyncStatus {
/// Info when syncing
Info(SyncInfo),
/// Not syncing
None
}
impl Serialize for SyncStatus {
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
where S: Serializer {
match *self {
SyncStatus::Info(ref info) => info.serialize(serializer),
SyncStatus::None => false.serialize(serializer)
}
}
}
impl From<SyncPeerInfo> for PeerInfo {
fn from(p: SyncPeerInfo) -> PeerInfo {
PeerInfo {
id: p.id,
name: p.client_version,
caps: p.capabilities,
network: PeerNetworkInfo {
remote_address: p.remote_address,
local_address: p.local_address,
},
protocols: PeerProtocolsInfo {
eth: Some(PeerEthereumProtocolInfo {
version: p.eth_version,
difficulty: p.eth_difficulty.map(|d| d.into()),
head: p.eth_head.hex(),
})
},
}
}
}
#[cfg(test)]
mod tests {
use serde_json;
use super::{SyncInfo, SyncStatus, Peers};
#[test]
fn test_serialize_sync_info() |
#[test]
fn test_serialize_peers() {
let t = Peers::default();
let serialized = serde_json::to_string(&t).unwrap();
assert_eq!(serialized, r#"{"active":0,"connected":0,"max":0,"peers":[]}"#);
}
#[test]
fn test_serialize_sync_status() {
let t = SyncStatus::None;
let serialized = serde_json::to_string(&t).unwrap();
assert_eq!(serialized, "false");
let t = SyncStatus::Info(SyncInfo::default());
let serialized = serde_json::to_string(&t).unwrap();
assert_eq!(serialized, r#"{"startingBlock":"0x0","currentBlock":"0x0","highestBlock":"0x0"}"#);
}
}
| {
let t = SyncInfo::default();
let serialized = serde_json::to_string(&t).unwrap();
assert_eq!(serialized, r#"{"startingBlock":"0x0","currentBlock":"0x0","highestBlock":"0x0"}"#);
} | identifier_body |
sync.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use ethsync::PeerInfo as SyncPeerInfo;
use serde::{Serialize, Serializer};
use v1::types::U256;
/// Sync info
#[derive(Default, Debug, Serialize, PartialEq)]
pub struct SyncInfo {
/// Starting block
#[serde(rename="startingBlock")]
pub starting_block: U256,
/// Current block
#[serde(rename="currentBlock")]
pub current_block: U256,
/// Highest block seen so far
#[serde(rename="highestBlock")]
pub highest_block: U256,
}
/// Peers info
#[derive(Default, Debug, Serialize)]
pub struct Peers {
/// Number of active peers
pub active: usize,
/// Number of connected peers
pub connected: usize,
/// Max number of peers
pub max: u32,
/// Detailed information on peers
pub peers: Vec<PeerInfo>,
}
/// Peer connection information
#[derive(Default, Debug, Serialize)]
pub struct | {
/// Public node id
pub id: Option<String>,
/// Node client ID
pub name: String,
/// Capabilities
pub caps: Vec<String>,
/// Network information
pub network: PeerNetworkInfo,
/// Protocols information
pub protocols: PeerProtocolsInfo,
}
/// Peer network information
#[derive(Default, Debug, Serialize)]
pub struct PeerNetworkInfo {
/// Remote endpoint address
#[serde(rename="remoteAddress")]
pub remote_address: String,
/// Local endpoint address
#[serde(rename="localAddress")]
pub local_address: String,
}
/// Peer protocols information
#[derive(Default, Debug, Serialize)]
pub struct PeerProtocolsInfo {
/// Ethereum protocol information
pub eth: Option<PeerEthereumProtocolInfo>,
}
/// Peer Ethereum protocol information
#[derive(Default, Debug, Serialize)]
pub struct PeerEthereumProtocolInfo {
/// Negotiated ethereum protocol version
pub version: u32,
/// Peer total difficulty if known
pub difficulty: Option<U256>,
/// SHA3 of peer best block hash
pub head: String,
}
/// Sync status
#[derive(Debug, PartialEq)]
pub enum SyncStatus {
/// Info when syncing
Info(SyncInfo),
/// Not syncing
None
}
impl Serialize for SyncStatus {
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
where S: Serializer {
match *self {
SyncStatus::Info(ref info) => info.serialize(serializer),
SyncStatus::None => false.serialize(serializer)
}
}
}
impl From<SyncPeerInfo> for PeerInfo {
fn from(p: SyncPeerInfo) -> PeerInfo {
PeerInfo {
id: p.id,
name: p.client_version,
caps: p.capabilities,
network: PeerNetworkInfo {
remote_address: p.remote_address,
local_address: p.local_address,
},
protocols: PeerProtocolsInfo {
eth: Some(PeerEthereumProtocolInfo {
version: p.eth_version,
difficulty: p.eth_difficulty.map(|d| d.into()),
head: p.eth_head.hex(),
})
},
}
}
}
#[cfg(test)]
mod tests {
use serde_json;
use super::{SyncInfo, SyncStatus, Peers};
#[test]
fn test_serialize_sync_info() {
let t = SyncInfo::default();
let serialized = serde_json::to_string(&t).unwrap();
assert_eq!(serialized, r#"{"startingBlock":"0x0","currentBlock":"0x0","highestBlock":"0x0"}"#);
}
#[test]
fn test_serialize_peers() {
let t = Peers::default();
let serialized = serde_json::to_string(&t).unwrap();
assert_eq!(serialized, r#"{"active":0,"connected":0,"max":0,"peers":[]}"#);
}
#[test]
fn test_serialize_sync_status() {
let t = SyncStatus::None;
let serialized = serde_json::to_string(&t).unwrap();
assert_eq!(serialized, "false");
let t = SyncStatus::Info(SyncInfo::default());
let serialized = serde_json::to_string(&t).unwrap();
assert_eq!(serialized, r#"{"startingBlock":"0x0","currentBlock":"0x0","highestBlock":"0x0"}"#);
}
}
| PeerInfo | identifier_name |
error.rs | extern crate backtrace;
extern crate libc;
use std::fmt;
use std::ops::Deref;
use std::io;
use std::string::FromUtf8Error;
use self::backtrace::Backtrace;
use self::backtrace::BacktraceFrame;
#[derive(Debug)]
pub struct RError<E> {
e: E,
bt: Option<Backtrace>,
}
pub fn is_enoent(e: &io::Error) -> bool {
return e.kind() == io::ErrorKind::NotFound;
}
pub fn try_enoent(e: io::Error) -> Result<bool> {
if is_enoent(&e) {
return Ok(true);
} else {
return Err(RError::from(e));
}
}
pub fn propagate<T>(e: io::Error) -> Result<T> {
return Err(RError::propagate(e));
}
pub fn errno(e: &RError<io::Error>) -> libc::c_int {
if RError::expected(e) {
return e.e.raw_os_error().unwrap();
} else {
return libc::EIO;
}
}
impl<E> RError<E> {
pub fn propagate(e: E) -> RError<E> |
pub fn from(e: E) -> RError<E> {
let mut bt = Backtrace::new();
let mut i: usize = 0;
let mut chop: usize = 0;
for f in bt.frames() {
if let Some(sym) = f.symbols().first() {
if let Some(p) = sym.filename() {
if p.file_name().unwrap() == "error.rs" {
chop = i;
break;
}
}
}
i += 1;
}
if chop!= 0 {
let mut frames: Vec<BacktraceFrame> = bt.into();
let _: Vec<_> = frames.drain(0..i).collect();
bt = Backtrace::from(frames);
}
RError { e: e, bt: Some(bt) }
}
fn expected(&self) -> bool {
return self.bt.is_none();
}
}
impl RError<io::Error> {
pub fn errno(&self) -> i32 {
return self.e.raw_os_error().unwrap();
}
}
impl fmt::Display for RError<io::Error> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.bt {
Some(ref bt) => write!(f, "{} {:?}", self.e, bt),
None => write!(f, "{}", self.e),
}
}
}
impl<E> Deref for RError<E> {
type Target = E;
fn deref(&self) -> &E {
&self.e
}
}
// XXX not really a clone
impl Clone for RError<io::Error> {
fn clone(&self) -> Self {
RError {
e: io::Error::from_raw_os_error(self.e.raw_os_error().unwrap()),
bt: Default::default(),
}
}
}
impl From<io::Error> for RError<io::Error> {
fn from(e: io::Error) -> RError<io::Error> {
RError::from(e)
}
}
impl From<FromUtf8Error> for RError<FromUtf8Error> {
fn from(e: FromUtf8Error) -> RError<FromUtf8Error> {
RError::from(e)
}
}
pub type Result<T> = ::std::result::Result<T, RError<io::Error>>;
| {
RError {
e: e,
bt: Default::default(),
}
} | identifier_body |
error.rs | extern crate backtrace;
extern crate libc;
use std::fmt;
use std::ops::Deref;
use std::io;
use std::string::FromUtf8Error;
use self::backtrace::Backtrace;
use self::backtrace::BacktraceFrame;
#[derive(Debug)]
pub struct RError<E> {
e: E,
bt: Option<Backtrace>,
}
pub fn is_enoent(e: &io::Error) -> bool {
return e.kind() == io::ErrorKind::NotFound;
}
pub fn try_enoent(e: io::Error) -> Result<bool> {
if is_enoent(&e) {
return Ok(true);
} else {
return Err(RError::from(e));
}
}
pub fn propagate<T>(e: io::Error) -> Result<T> {
return Err(RError::propagate(e));
}
pub fn errno(e: &RError<io::Error>) -> libc::c_int {
if RError::expected(e) | else {
return libc::EIO;
}
}
impl<E> RError<E> {
pub fn propagate(e: E) -> RError<E> {
RError {
e: e,
bt: Default::default(),
}
}
pub fn from(e: E) -> RError<E> {
let mut bt = Backtrace::new();
let mut i: usize = 0;
let mut chop: usize = 0;
for f in bt.frames() {
if let Some(sym) = f.symbols().first() {
if let Some(p) = sym.filename() {
if p.file_name().unwrap() == "error.rs" {
chop = i;
break;
}
}
}
i += 1;
}
if chop!= 0 {
let mut frames: Vec<BacktraceFrame> = bt.into();
let _: Vec<_> = frames.drain(0..i).collect();
bt = Backtrace::from(frames);
}
RError { e: e, bt: Some(bt) }
}
fn expected(&self) -> bool {
return self.bt.is_none();
}
}
impl RError<io::Error> {
pub fn errno(&self) -> i32 {
return self.e.raw_os_error().unwrap();
}
}
impl fmt::Display for RError<io::Error> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.bt {
Some(ref bt) => write!(f, "{} {:?}", self.e, bt),
None => write!(f, "{}", self.e),
}
}
}
impl<E> Deref for RError<E> {
type Target = E;
fn deref(&self) -> &E {
&self.e
}
}
// XXX not really a clone
impl Clone for RError<io::Error> {
fn clone(&self) -> Self {
RError {
e: io::Error::from_raw_os_error(self.e.raw_os_error().unwrap()),
bt: Default::default(),
}
}
}
impl From<io::Error> for RError<io::Error> {
fn from(e: io::Error) -> RError<io::Error> {
RError::from(e)
}
}
impl From<FromUtf8Error> for RError<FromUtf8Error> {
fn from(e: FromUtf8Error) -> RError<FromUtf8Error> {
RError::from(e)
}
}
pub type Result<T> = ::std::result::Result<T, RError<io::Error>>;
| {
return e.e.raw_os_error().unwrap();
} | conditional_block |
error.rs | extern crate backtrace;
extern crate libc;
use std::fmt;
use std::ops::Deref;
use std::io;
use std::string::FromUtf8Error;
use self::backtrace::Backtrace;
use self::backtrace::BacktraceFrame;
#[derive(Debug)]
pub struct RError<E> {
e: E,
bt: Option<Backtrace>,
}
pub fn is_enoent(e: &io::Error) -> bool {
return e.kind() == io::ErrorKind::NotFound;
}
pub fn try_enoent(e: io::Error) -> Result<bool> {
if is_enoent(&e) {
return Ok(true);
} else {
return Err(RError::from(e));
}
}
pub fn | <T>(e: io::Error) -> Result<T> {
return Err(RError::propagate(e));
}
pub fn errno(e: &RError<io::Error>) -> libc::c_int {
if RError::expected(e) {
return e.e.raw_os_error().unwrap();
} else {
return libc::EIO;
}
}
impl<E> RError<E> {
pub fn propagate(e: E) -> RError<E> {
RError {
e: e,
bt: Default::default(),
}
}
pub fn from(e: E) -> RError<E> {
let mut bt = Backtrace::new();
let mut i: usize = 0;
let mut chop: usize = 0;
for f in bt.frames() {
if let Some(sym) = f.symbols().first() {
if let Some(p) = sym.filename() {
if p.file_name().unwrap() == "error.rs" {
chop = i;
break;
}
}
}
i += 1;
}
if chop!= 0 {
let mut frames: Vec<BacktraceFrame> = bt.into();
let _: Vec<_> = frames.drain(0..i).collect();
bt = Backtrace::from(frames);
}
RError { e: e, bt: Some(bt) }
}
fn expected(&self) -> bool {
return self.bt.is_none();
}
}
impl RError<io::Error> {
pub fn errno(&self) -> i32 {
return self.e.raw_os_error().unwrap();
}
}
impl fmt::Display for RError<io::Error> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.bt {
Some(ref bt) => write!(f, "{} {:?}", self.e, bt),
None => write!(f, "{}", self.e),
}
}
}
impl<E> Deref for RError<E> {
type Target = E;
fn deref(&self) -> &E {
&self.e
}
}
// XXX not really a clone
impl Clone for RError<io::Error> {
fn clone(&self) -> Self {
RError {
e: io::Error::from_raw_os_error(self.e.raw_os_error().unwrap()),
bt: Default::default(),
}
}
}
impl From<io::Error> for RError<io::Error> {
fn from(e: io::Error) -> RError<io::Error> {
RError::from(e)
}
}
impl From<FromUtf8Error> for RError<FromUtf8Error> {
fn from(e: FromUtf8Error) -> RError<FromUtf8Error> {
RError::from(e)
}
}
pub type Result<T> = ::std::result::Result<T, RError<io::Error>>;
| propagate | identifier_name |
error.rs | extern crate backtrace;
extern crate libc;
use std::fmt;
use std::ops::Deref;
use std::io;
use std::string::FromUtf8Error;
use self::backtrace::Backtrace;
use self::backtrace::BacktraceFrame; | pub struct RError<E> {
e: E,
bt: Option<Backtrace>,
}
pub fn is_enoent(e: &io::Error) -> bool {
return e.kind() == io::ErrorKind::NotFound;
}
pub fn try_enoent(e: io::Error) -> Result<bool> {
if is_enoent(&e) {
return Ok(true);
} else {
return Err(RError::from(e));
}
}
pub fn propagate<T>(e: io::Error) -> Result<T> {
return Err(RError::propagate(e));
}
pub fn errno(e: &RError<io::Error>) -> libc::c_int {
if RError::expected(e) {
return e.e.raw_os_error().unwrap();
} else {
return libc::EIO;
}
}
impl<E> RError<E> {
pub fn propagate(e: E) -> RError<E> {
RError {
e: e,
bt: Default::default(),
}
}
pub fn from(e: E) -> RError<E> {
let mut bt = Backtrace::new();
let mut i: usize = 0;
let mut chop: usize = 0;
for f in bt.frames() {
if let Some(sym) = f.symbols().first() {
if let Some(p) = sym.filename() {
if p.file_name().unwrap() == "error.rs" {
chop = i;
break;
}
}
}
i += 1;
}
if chop!= 0 {
let mut frames: Vec<BacktraceFrame> = bt.into();
let _: Vec<_> = frames.drain(0..i).collect();
bt = Backtrace::from(frames);
}
RError { e: e, bt: Some(bt) }
}
fn expected(&self) -> bool {
return self.bt.is_none();
}
}
impl RError<io::Error> {
pub fn errno(&self) -> i32 {
return self.e.raw_os_error().unwrap();
}
}
impl fmt::Display for RError<io::Error> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.bt {
Some(ref bt) => write!(f, "{} {:?}", self.e, bt),
None => write!(f, "{}", self.e),
}
}
}
impl<E> Deref for RError<E> {
type Target = E;
fn deref(&self) -> &E {
&self.e
}
}
// XXX not really a clone
impl Clone for RError<io::Error> {
fn clone(&self) -> Self {
RError {
e: io::Error::from_raw_os_error(self.e.raw_os_error().unwrap()),
bt: Default::default(),
}
}
}
impl From<io::Error> for RError<io::Error> {
fn from(e: io::Error) -> RError<io::Error> {
RError::from(e)
}
}
impl From<FromUtf8Error> for RError<FromUtf8Error> {
fn from(e: FromUtf8Error) -> RError<FromUtf8Error> {
RError::from(e)
}
}
pub type Result<T> = ::std::result::Result<T, RError<io::Error>>; |
#[derive(Debug)] | random_line_split |
main.rs | use std::net::SocketAddr;
use std::sync::Arc;
use hyper::server::Server;
use hyper::service::{make_service_fn, service_fn};
use log::{error, info};
use crate::runtime;
use crate::server::github_handler::GithubHandlerState;
use crate::server::octobot_service::OctobotService;
use crate::server::sessions::Sessions;
use octobot_lib::config::Config;
use octobot_lib::github;
use octobot_lib::jira;
use octobot_lib::jira::api::JiraSession;
use octobot_lib::metrics;
pub fn start(config: Config) {
let num_http_threads = config.main.num_http_threads.unwrap_or(20);
let metrics = metrics::Metrics::new();
runtime::run(num_http_threads, metrics.clone(), async move {
run_server(config, metrics).await
});
}
async fn run_server(config: Config, metrics: Arc<metrics::Metrics>) {
let config = Arc::new(config);
let github: Arc<dyn github::api::GithubSessionFactory>;
if config.github.app_id.is_some() {
github = match github::api::GithubApp::new(
&config.github.host,
config.github.app_id.expect("expected an app_id"),
&config.github.app_key().expect("expected an app_key"),
Some(metrics.clone()),
)
.await
{
Ok(s) => Arc::new(s),
Err(e) => panic!("Error initiating github session: {}", e),
};
} else {
github = match github::api::GithubOauthApp::new(
&config.github.host,
config
.github
.api_token
.as_ref()
.expect("expected an api_token"),
Some(metrics.clone()),
) | Ok(s) => Arc::new(s),
Err(e) => panic!("Error initiating github session: {}", e),
};
}
let jira: Option<Arc<dyn jira::api::Session>>;
if let Some(ref jira_config) = config.jira {
jira = match JiraSession::new(jira_config, Some(metrics.clone())).await {
Ok(s) => Some(Arc::new(s)),
Err(e) => panic!("Error initiating jira session: {}", e),
};
} else {
jira = None;
}
let http_addr: SocketAddr = match config.main.listen_addr {
Some(ref addr_and_port) => addr_and_port.parse().unwrap(),
None => "0.0.0.0:3000".parse().unwrap(),
};
let ui_sessions = Arc::new(Sessions::new());
let github_handler_state = Arc::new(GithubHandlerState::new(
config.clone(),
github.clone(),
jira.clone(),
metrics.clone(),
));
let octobot = OctobotService::new(
config.clone(),
ui_sessions.clone(),
github_handler_state.clone(),
metrics.clone(),
);
let main_service = make_service_fn(move |_| {
let metrics = metrics.clone();
let _scoped_count = metrics::scoped_inc(&metrics.current_connection_count);
let octobot = octobot.clone();
async move {
// move the scoped count inside the future
let _scoped_count = _scoped_count;
let octobot = octobot.clone();
Ok::<_, hyper::Error>(service_fn(move |req| {
let octobot = octobot.clone();
octobot.call(req)
}))
}
});
let server = Server::bind(&http_addr).serve(main_service);
info!("Listening (HTTP) on {}", http_addr);
if let Err(e) = server.await {
error!("server error: {}", e);
}
} | .await
{ | random_line_split |
main.rs | use std::net::SocketAddr;
use std::sync::Arc;
use hyper::server::Server;
use hyper::service::{make_service_fn, service_fn};
use log::{error, info};
use crate::runtime;
use crate::server::github_handler::GithubHandlerState;
use crate::server::octobot_service::OctobotService;
use crate::server::sessions::Sessions;
use octobot_lib::config::Config;
use octobot_lib::github;
use octobot_lib::jira;
use octobot_lib::jira::api::JiraSession;
use octobot_lib::metrics;
pub fn | (config: Config) {
let num_http_threads = config.main.num_http_threads.unwrap_or(20);
let metrics = metrics::Metrics::new();
runtime::run(num_http_threads, metrics.clone(), async move {
run_server(config, metrics).await
});
}
async fn run_server(config: Config, metrics: Arc<metrics::Metrics>) {
let config = Arc::new(config);
let github: Arc<dyn github::api::GithubSessionFactory>;
if config.github.app_id.is_some() {
github = match github::api::GithubApp::new(
&config.github.host,
config.github.app_id.expect("expected an app_id"),
&config.github.app_key().expect("expected an app_key"),
Some(metrics.clone()),
)
.await
{
Ok(s) => Arc::new(s),
Err(e) => panic!("Error initiating github session: {}", e),
};
} else {
github = match github::api::GithubOauthApp::new(
&config.github.host,
config
.github
.api_token
.as_ref()
.expect("expected an api_token"),
Some(metrics.clone()),
)
.await
{
Ok(s) => Arc::new(s),
Err(e) => panic!("Error initiating github session: {}", e),
};
}
let jira: Option<Arc<dyn jira::api::Session>>;
if let Some(ref jira_config) = config.jira {
jira = match JiraSession::new(jira_config, Some(metrics.clone())).await {
Ok(s) => Some(Arc::new(s)),
Err(e) => panic!("Error initiating jira session: {}", e),
};
} else {
jira = None;
}
let http_addr: SocketAddr = match config.main.listen_addr {
Some(ref addr_and_port) => addr_and_port.parse().unwrap(),
None => "0.0.0.0:3000".parse().unwrap(),
};
let ui_sessions = Arc::new(Sessions::new());
let github_handler_state = Arc::new(GithubHandlerState::new(
config.clone(),
github.clone(),
jira.clone(),
metrics.clone(),
));
let octobot = OctobotService::new(
config.clone(),
ui_sessions.clone(),
github_handler_state.clone(),
metrics.clone(),
);
let main_service = make_service_fn(move |_| {
let metrics = metrics.clone();
let _scoped_count = metrics::scoped_inc(&metrics.current_connection_count);
let octobot = octobot.clone();
async move {
// move the scoped count inside the future
let _scoped_count = _scoped_count;
let octobot = octobot.clone();
Ok::<_, hyper::Error>(service_fn(move |req| {
let octobot = octobot.clone();
octobot.call(req)
}))
}
});
let server = Server::bind(&http_addr).serve(main_service);
info!("Listening (HTTP) on {}", http_addr);
if let Err(e) = server.await {
error!("server error: {}", e);
}
}
| start | identifier_name |
main.rs | use std::net::SocketAddr;
use std::sync::Arc;
use hyper::server::Server;
use hyper::service::{make_service_fn, service_fn};
use log::{error, info};
use crate::runtime;
use crate::server::github_handler::GithubHandlerState;
use crate::server::octobot_service::OctobotService;
use crate::server::sessions::Sessions;
use octobot_lib::config::Config;
use octobot_lib::github;
use octobot_lib::jira;
use octobot_lib::jira::api::JiraSession;
use octobot_lib::metrics;
pub fn start(config: Config) {
let num_http_threads = config.main.num_http_threads.unwrap_or(20);
let metrics = metrics::Metrics::new();
runtime::run(num_http_threads, metrics.clone(), async move {
run_server(config, metrics).await
});
}
async fn run_server(config: Config, metrics: Arc<metrics::Metrics>) | config
.github
.api_token
.as_ref()
.expect("expected an api_token"),
Some(metrics.clone()),
)
.await
{
Ok(s) => Arc::new(s),
Err(e) => panic!("Error initiating github session: {}", e),
};
}
let jira: Option<Arc<dyn jira::api::Session>>;
if let Some(ref jira_config) = config.jira {
jira = match JiraSession::new(jira_config, Some(metrics.clone())).await {
Ok(s) => Some(Arc::new(s)),
Err(e) => panic!("Error initiating jira session: {}", e),
};
} else {
jira = None;
}
let http_addr: SocketAddr = match config.main.listen_addr {
Some(ref addr_and_port) => addr_and_port.parse().unwrap(),
None => "0.0.0.0:3000".parse().unwrap(),
};
let ui_sessions = Arc::new(Sessions::new());
let github_handler_state = Arc::new(GithubHandlerState::new(
config.clone(),
github.clone(),
jira.clone(),
metrics.clone(),
));
let octobot = OctobotService::new(
config.clone(),
ui_sessions.clone(),
github_handler_state.clone(),
metrics.clone(),
);
let main_service = make_service_fn(move |_| {
let metrics = metrics.clone();
let _scoped_count = metrics::scoped_inc(&metrics.current_connection_count);
let octobot = octobot.clone();
async move {
// move the scoped count inside the future
let _scoped_count = _scoped_count;
let octobot = octobot.clone();
Ok::<_, hyper::Error>(service_fn(move |req| {
let octobot = octobot.clone();
octobot.call(req)
}))
}
});
let server = Server::bind(&http_addr).serve(main_service);
info!("Listening (HTTP) on {}", http_addr);
if let Err(e) = server.await {
error!("server error: {}", e);
}
}
| {
let config = Arc::new(config);
let github: Arc<dyn github::api::GithubSessionFactory>;
if config.github.app_id.is_some() {
github = match github::api::GithubApp::new(
&config.github.host,
config.github.app_id.expect("expected an app_id"),
&config.github.app_key().expect("expected an app_key"),
Some(metrics.clone()),
)
.await
{
Ok(s) => Arc::new(s),
Err(e) => panic!("Error initiating github session: {}", e),
};
} else {
github = match github::api::GithubOauthApp::new(
&config.github.host, | identifier_body |
local_ptr.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Access to a single thread-local pointer.
//!
//! The runtime will use this for storing ~Task.
//!
//! FIXME: Add runtime checks for usage of inconsistent pointer types.
//! and for overwriting an existing pointer.
#![allow(dead_code)]
use cast;
use ops::{Drop, Deref, DerefMut};
use ptr::RawPtr;
#[cfg(windows)] // mingw-w32 doesn't like thread_local things
#[cfg(target_os = "android")] // see #10686
pub use self::native::{init, cleanup, put, take, try_take, unsafe_take, exists,
unsafe_borrow, try_unsafe_borrow};
#[cfg(not(windows), not(target_os = "android"))]
pub use self::compiled::{init, cleanup, put, take, try_take, unsafe_take, exists,
unsafe_borrow, try_unsafe_borrow};
/// Encapsulates a borrowed value. When this value goes out of scope, the
/// pointer is returned.
pub struct Borrowed<T> {
val: *(),
}
#[unsafe_destructor]
impl<T> Drop for Borrowed<T> {
fn drop(&mut self) {
unsafe {
if self.val.is_null() {
rtabort!("Aiee, returning null borrowed object!");
}
let val: ~T = cast::transmute(self.val);
put::<T>(val);
rtassert!(exists());
}
}
}
impl<T> Deref<T> for Borrowed<T> {
fn deref<'a>(&'a self) -> &'a T {
unsafe { &*(self.val as *T) }
}
}
impl<T> DerefMut<T> for Borrowed<T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut T {
unsafe { &mut *(self.val as *mut T) }
}
}
/// Borrow the thread-local value from thread-local storage.
/// While the value is borrowed it is not available in TLS.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline]
pub unsafe fn borrow<T>() -> Borrowed<T> {
let val: *() = cast::transmute(take::<T>());
Borrowed {
val: val,
}
}
/// Compiled implementation of accessing the runtime local pointer. This is
/// implemented using LLVM's thread_local attribute which isn't necessarily
/// working on all platforms. This implementation is faster, however, so we use
/// it wherever possible.
#[cfg(not(windows), not(target_os = "android"))]
pub mod compiled {
use cast;
use option::{Option, Some, None};
use ptr::RawPtr;
#[cfg(test)]
pub use realstd::rt::shouldnt_be_public::RT_TLS_PTR;
#[cfg(not(test))]
#[thread_local]
pub static mut RT_TLS_PTR: *mut u8 = 0 as *mut u8;
pub fn init() {}
pub unsafe fn cleanup() {}
// Rationale for all of these functions being inline(never)
//
// The #[thread_local] annotation gets propagated all the way through to
// LLVM, meaning the global is specially treated by LLVM to lower it to an
// efficient sequence of instructions. This also involves dealing with fun
// stuff in object files and whatnot. Regardless, it turns out this causes
// trouble with green threads and lots of optimizations turned on. The
// following case study was done on linux x86_64, but I would imagine that
// other platforms are similar.
//
// On linux, the instruction sequence for loading the tls pointer global
// looks like:
//
// mov %fs:0x0, %rax
// mov -0x8(%rax), %rbx
//
// This code leads me to believe that (%fs:0x0) is a table, and then the
// table contains the TLS values for the process. Hence, the slot at offset
// -0x8 is the task TLS pointer. This leads us to the conclusion that this
// table is the actual thread local part of each thread. The kernel sets up
// the fs segment selector to point at the right region of memory for each
// thread.
//
// Optimizations lead me to believe that this code is lowered to these
// instructions in the LLVM codegen passes, because you'll see code like
// this when everything is optimized:
//
// mov %fs:0x0, %r14
// mov -0x8(%r14), %rbx
// // do something with %rbx, the rust Task pointer
//
// ... // <- do more things
//
// mov -0x8(%r14), %rbx
// // do something else with %rbx
//
// Note that the optimization done here is that the first load is not
// duplicated during the lower instructions. This means that the %fs:0x0
// memory location is only dereferenced once.
//
// Normally, this is actually a good thing! With green threads, however,
// it's very possible for the code labeled "do more things" to context
// switch to another thread. If this happens, then we *must* re-load %fs:0x0
// because it's changed (we're on a different thread). If we don't re-load
// the table location, then we'll be reading the original thread's TLS
// values, not our thread's TLS values.
//
// Hence, we never inline these functions. By never inlining, we're
// guaranteed that loading the table is a local decision which is forced to
// *always* happen.
/// Give a pointer to thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline(never)] // see comments above
pub unsafe fn put<T>(sched: ~T) {
RT_TLS_PTR = cast::transmute(sched)
}
/// Take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline(never)] // see comments above
pub unsafe fn take<T>() -> ~T {
let ptr = RT_TLS_PTR;
rtassert!(!ptr.is_null());
let ptr: ~T = cast::transmute(ptr);
// can't use `as`, due to type not matching with `cfg(test)`
RT_TLS_PTR = cast::transmute(0);
ptr
}
/// Optionally take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline(never)] // see comments above
pub unsafe fn try_take<T>() -> Option<~T> {
let ptr = RT_TLS_PTR;
if ptr.is_null() {
None
} else |
}
/// Take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
/// Leaves the old pointer in TLS for speed.
#[inline(never)] // see comments above
pub unsafe fn unsafe_take<T>() -> ~T {
cast::transmute(RT_TLS_PTR)
}
/// Check whether there is a thread-local pointer installed.
#[inline(never)] // see comments above
pub fn exists() -> bool {
unsafe {
RT_TLS_PTR.is_not_null()
}
}
#[inline(never)] // see comments above
pub unsafe fn unsafe_borrow<T>() -> *mut T {
if RT_TLS_PTR.is_null() {
rtabort!("thread-local pointer is null. bogus!");
}
RT_TLS_PTR as *mut T
}
#[inline(never)] // see comments above
pub unsafe fn try_unsafe_borrow<T>() -> Option<*mut T> {
if RT_TLS_PTR.is_null() {
None
} else {
Some(RT_TLS_PTR as *mut T)
}
}
}
/// Native implementation of having the runtime thread-local pointer. This
/// implementation uses the `thread_local_storage` module to provide a
/// thread-local value.
pub mod native {
use cast;
use option::{Option, Some, None};
use ptr;
use ptr::RawPtr;
use tls = rt::thread_local_storage;
static mut RT_TLS_KEY: tls::Key = -1;
/// Initialize the TLS key. Other ops will fail if this isn't executed
/// first.
pub fn init() {
unsafe {
tls::create(&mut RT_TLS_KEY);
}
}
pub unsafe fn cleanup() {
rtassert!(RT_TLS_KEY!= -1);
tls::destroy(RT_TLS_KEY);
}
/// Give a pointer to thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline]
pub unsafe fn put<T>(sched: ~T) {
let key = tls_key();
let void_ptr: *mut u8 = cast::transmute(sched);
tls::set(key, void_ptr);
}
/// Take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline]
pub unsafe fn take<T>() -> ~T {
let key = tls_key();
let void_ptr: *mut u8 = tls::get(key);
if void_ptr.is_null() {
rtabort!("thread-local pointer is null. bogus!");
}
let ptr: ~T = cast::transmute(void_ptr);
tls::set(key, ptr::mut_null());
return ptr;
}
/// Optionally take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline]
pub unsafe fn try_take<T>() -> Option<~T> {
match maybe_tls_key() {
Some(key) => {
let void_ptr: *mut u8 = tls::get(key);
if void_ptr.is_null() {
None
} else {
let ptr: ~T = cast::transmute(void_ptr);
tls::set(key, ptr::mut_null());
Some(ptr)
}
}
None => None
}
}
/// Take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
/// Leaves the old pointer in TLS for speed.
#[inline]
pub unsafe fn unsafe_take<T>() -> ~T {
let key = tls_key();
let void_ptr: *mut u8 = tls::get(key);
if void_ptr.is_null() {
rtabort!("thread-local pointer is null. bogus!");
}
let ptr: ~T = cast::transmute(void_ptr);
return ptr;
}
/// Check whether there is a thread-local pointer installed.
pub fn exists() -> bool {
unsafe {
match maybe_tls_key() {
Some(key) => tls::get(key).is_not_null(),
None => false
}
}
}
/// Borrow a mutable reference to the thread-local value
///
/// # Safety Note
///
/// Because this leaves the value in thread-local storage it is possible
/// For the Scheduler pointer to be aliased
pub unsafe fn unsafe_borrow<T>() -> *mut T {
let key = tls_key();
let void_ptr = tls::get(key);
if void_ptr.is_null() {
rtabort!("thread-local pointer is null. bogus!");
}
void_ptr as *mut T
}
pub unsafe fn try_unsafe_borrow<T>() -> Option<*mut T> {
match maybe_tls_key() {
Some(key) => {
let void_ptr = tls::get(key);
if void_ptr.is_null() {
None
} else {
Some(void_ptr as *mut T)
}
}
None => None
}
}
#[inline]
fn tls_key() -> tls::Key {
match maybe_tls_key() {
Some(key) => key,
None => rtabort!("runtime tls key not initialized")
}
}
#[inline]
#[cfg(not(test))]
#[allow(visible_private_types)]
pub fn maybe_tls_key() -> Option<tls::Key> {
unsafe {
// NB: This is a little racy because, while the key is
// initialized under a mutex and it's assumed to be initialized
// in the Scheduler ctor by any thread that needs to use it,
// we are not accessing the key under a mutex. Threads that
// are not using the new Scheduler but still *want to check*
// whether they are running under a new Scheduler may see a 0
// value here that is in the process of being initialized in
// another thread. I think this is fine since the only action
// they could take if it was initialized would be to check the
// thread-local value and see that it's not set.
if RT_TLS_KEY!= -1 {
return Some(RT_TLS_KEY);
} else {
return None;
}
}
}
#[inline] #[cfg(test)]
pub fn maybe_tls_key() -> Option<tls::Key> {
use realstd;
unsafe {
cast::transmute(realstd::rt::shouldnt_be_public::maybe_tls_key())
}
}
}
| {
let ptr: ~T = cast::transmute(ptr);
// can't use `as`, due to type not matching with `cfg(test)`
RT_TLS_PTR = cast::transmute(0);
Some(ptr)
} | conditional_block |
local_ptr.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Access to a single thread-local pointer.
//!
//! The runtime will use this for storing ~Task.
//!
//! FIXME: Add runtime checks for usage of inconsistent pointer types.
//! and for overwriting an existing pointer.
#![allow(dead_code)]
use cast;
use ops::{Drop, Deref, DerefMut};
use ptr::RawPtr;
#[cfg(windows)] // mingw-w32 doesn't like thread_local things
#[cfg(target_os = "android")] // see #10686
pub use self::native::{init, cleanup, put, take, try_take, unsafe_take, exists,
unsafe_borrow, try_unsafe_borrow};
#[cfg(not(windows), not(target_os = "android"))]
pub use self::compiled::{init, cleanup, put, take, try_take, unsafe_take, exists,
unsafe_borrow, try_unsafe_borrow};
/// Encapsulates a borrowed value. When this value goes out of scope, the
/// pointer is returned.
pub struct Borrowed<T> {
val: *(),
}
#[unsafe_destructor]
impl<T> Drop for Borrowed<T> {
fn drop(&mut self) {
unsafe {
if self.val.is_null() {
rtabort!("Aiee, returning null borrowed object!");
}
let val: ~T = cast::transmute(self.val);
put::<T>(val);
rtassert!(exists());
}
}
}
impl<T> Deref<T> for Borrowed<T> {
fn deref<'a>(&'a self) -> &'a T {
unsafe { &*(self.val as *T) }
}
}
impl<T> DerefMut<T> for Borrowed<T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut T {
unsafe { &mut *(self.val as *mut T) }
}
}
/// Borrow the thread-local value from thread-local storage.
/// While the value is borrowed it is not available in TLS.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline]
pub unsafe fn borrow<T>() -> Borrowed<T> {
let val: *() = cast::transmute(take::<T>());
Borrowed {
val: val,
}
}
/// Compiled implementation of accessing the runtime local pointer. This is
/// implemented using LLVM's thread_local attribute which isn't necessarily
/// working on all platforms. This implementation is faster, however, so we use
/// it wherever possible.
#[cfg(not(windows), not(target_os = "android"))]
pub mod compiled {
use cast;
use option::{Option, Some, None};
use ptr::RawPtr;
#[cfg(test)]
pub use realstd::rt::shouldnt_be_public::RT_TLS_PTR;
#[cfg(not(test))]
#[thread_local]
pub static mut RT_TLS_PTR: *mut u8 = 0 as *mut u8;
pub fn init() {}
pub unsafe fn cleanup() {}
// Rationale for all of these functions being inline(never)
//
// The #[thread_local] annotation gets propagated all the way through to
// LLVM, meaning the global is specially treated by LLVM to lower it to an
// efficient sequence of instructions. This also involves dealing with fun
// stuff in object files and whatnot. Regardless, it turns out this causes
// trouble with green threads and lots of optimizations turned on. The
// following case study was done on linux x86_64, but I would imagine that
// other platforms are similar.
//
// On linux, the instruction sequence for loading the tls pointer global
// looks like:
//
// mov %fs:0x0, %rax
// mov -0x8(%rax), %rbx
//
// This code leads me to believe that (%fs:0x0) is a table, and then the
// table contains the TLS values for the process. Hence, the slot at offset
// -0x8 is the task TLS pointer. This leads us to the conclusion that this
// table is the actual thread local part of each thread. The kernel sets up
// the fs segment selector to point at the right region of memory for each
// thread.
//
// Optimizations lead me to believe that this code is lowered to these
// instructions in the LLVM codegen passes, because you'll see code like
// this when everything is optimized:
//
// mov %fs:0x0, %r14
// mov -0x8(%r14), %rbx
// // do something with %rbx, the rust Task pointer
//
// ... // <- do more things
//
// mov -0x8(%r14), %rbx
// // do something else with %rbx
//
// Note that the optimization done here is that the first load is not
// duplicated during the lower instructions. This means that the %fs:0x0
// memory location is only dereferenced once.
//
// Normally, this is actually a good thing! With green threads, however,
// it's very possible for the code labeled "do more things" to context
// switch to another thread. If this happens, then we *must* re-load %fs:0x0
// because it's changed (we're on a different thread). If we don't re-load
// the table location, then we'll be reading the original thread's TLS
// values, not our thread's TLS values.
//
// Hence, we never inline these functions. By never inlining, we're
// guaranteed that loading the table is a local decision which is forced to
// *always* happen.
/// Give a pointer to thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline(never)] // see comments above
pub unsafe fn put<T>(sched: ~T) {
RT_TLS_PTR = cast::transmute(sched)
}
/// Take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline(never)] // see comments above
pub unsafe fn take<T>() -> ~T {
let ptr = RT_TLS_PTR;
rtassert!(!ptr.is_null());
let ptr: ~T = cast::transmute(ptr);
// can't use `as`, due to type not matching with `cfg(test)`
RT_TLS_PTR = cast::transmute(0);
ptr
}
/// Optionally take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline(never)] // see comments above
pub unsafe fn try_take<T>() -> Option<~T> {
let ptr = RT_TLS_PTR;
if ptr.is_null() {
None
} else {
let ptr: ~T = cast::transmute(ptr);
// can't use `as`, due to type not matching with `cfg(test)`
RT_TLS_PTR = cast::transmute(0);
Some(ptr)
}
}
/// Take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
/// Leaves the old pointer in TLS for speed.
#[inline(never)] // see comments above
pub unsafe fn | <T>() -> ~T {
cast::transmute(RT_TLS_PTR)
}
/// Check whether there is a thread-local pointer installed.
#[inline(never)] // see comments above
pub fn exists() -> bool {
unsafe {
RT_TLS_PTR.is_not_null()
}
}
#[inline(never)] // see comments above
pub unsafe fn unsafe_borrow<T>() -> *mut T {
if RT_TLS_PTR.is_null() {
rtabort!("thread-local pointer is null. bogus!");
}
RT_TLS_PTR as *mut T
}
#[inline(never)] // see comments above
pub unsafe fn try_unsafe_borrow<T>() -> Option<*mut T> {
if RT_TLS_PTR.is_null() {
None
} else {
Some(RT_TLS_PTR as *mut T)
}
}
}
/// Native implementation of having the runtime thread-local pointer. This
/// implementation uses the `thread_local_storage` module to provide a
/// thread-local value.
pub mod native {
use cast;
use option::{Option, Some, None};
use ptr;
use ptr::RawPtr;
use tls = rt::thread_local_storage;
static mut RT_TLS_KEY: tls::Key = -1;
/// Initialize the TLS key. Other ops will fail if this isn't executed
/// first.
pub fn init() {
unsafe {
tls::create(&mut RT_TLS_KEY);
}
}
pub unsafe fn cleanup() {
rtassert!(RT_TLS_KEY!= -1);
tls::destroy(RT_TLS_KEY);
}
/// Give a pointer to thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline]
pub unsafe fn put<T>(sched: ~T) {
let key = tls_key();
let void_ptr: *mut u8 = cast::transmute(sched);
tls::set(key, void_ptr);
}
/// Take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline]
pub unsafe fn take<T>() -> ~T {
let key = tls_key();
let void_ptr: *mut u8 = tls::get(key);
if void_ptr.is_null() {
rtabort!("thread-local pointer is null. bogus!");
}
let ptr: ~T = cast::transmute(void_ptr);
tls::set(key, ptr::mut_null());
return ptr;
}
/// Optionally take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline]
pub unsafe fn try_take<T>() -> Option<~T> {
match maybe_tls_key() {
Some(key) => {
let void_ptr: *mut u8 = tls::get(key);
if void_ptr.is_null() {
None
} else {
let ptr: ~T = cast::transmute(void_ptr);
tls::set(key, ptr::mut_null());
Some(ptr)
}
}
None => None
}
}
/// Take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
/// Leaves the old pointer in TLS for speed.
#[inline]
pub unsafe fn unsafe_take<T>() -> ~T {
let key = tls_key();
let void_ptr: *mut u8 = tls::get(key);
if void_ptr.is_null() {
rtabort!("thread-local pointer is null. bogus!");
}
let ptr: ~T = cast::transmute(void_ptr);
return ptr;
}
/// Check whether there is a thread-local pointer installed.
pub fn exists() -> bool {
unsafe {
match maybe_tls_key() {
Some(key) => tls::get(key).is_not_null(),
None => false
}
}
}
/// Borrow a mutable reference to the thread-local value
///
/// # Safety Note
///
/// Because this leaves the value in thread-local storage it is possible
/// For the Scheduler pointer to be aliased
pub unsafe fn unsafe_borrow<T>() -> *mut T {
let key = tls_key();
let void_ptr = tls::get(key);
if void_ptr.is_null() {
rtabort!("thread-local pointer is null. bogus!");
}
void_ptr as *mut T
}
pub unsafe fn try_unsafe_borrow<T>() -> Option<*mut T> {
match maybe_tls_key() {
Some(key) => {
let void_ptr = tls::get(key);
if void_ptr.is_null() {
None
} else {
Some(void_ptr as *mut T)
}
}
None => None
}
}
#[inline]
fn tls_key() -> tls::Key {
match maybe_tls_key() {
Some(key) => key,
None => rtabort!("runtime tls key not initialized")
}
}
#[inline]
#[cfg(not(test))]
#[allow(visible_private_types)]
pub fn maybe_tls_key() -> Option<tls::Key> {
unsafe {
// NB: This is a little racy because, while the key is
// initialized under a mutex and it's assumed to be initialized
// in the Scheduler ctor by any thread that needs to use it,
// we are not accessing the key under a mutex. Threads that
// are not using the new Scheduler but still *want to check*
// whether they are running under a new Scheduler may see a 0
// value here that is in the process of being initialized in
// another thread. I think this is fine since the only action
// they could take if it was initialized would be to check the
// thread-local value and see that it's not set.
if RT_TLS_KEY!= -1 {
return Some(RT_TLS_KEY);
} else {
return None;
}
}
}
#[inline] #[cfg(test)]
pub fn maybe_tls_key() -> Option<tls::Key> {
use realstd;
unsafe {
cast::transmute(realstd::rt::shouldnt_be_public::maybe_tls_key())
}
}
}
| unsafe_take | identifier_name |
local_ptr.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Access to a single thread-local pointer.
//!
//! The runtime will use this for storing ~Task.
//!
//! FIXME: Add runtime checks for usage of inconsistent pointer types.
//! and for overwriting an existing pointer.
#![allow(dead_code)]
use cast;
use ops::{Drop, Deref, DerefMut};
use ptr::RawPtr;
#[cfg(windows)] // mingw-w32 doesn't like thread_local things
#[cfg(target_os = "android")] // see #10686
pub use self::native::{init, cleanup, put, take, try_take, unsafe_take, exists,
unsafe_borrow, try_unsafe_borrow};
#[cfg(not(windows), not(target_os = "android"))]
pub use self::compiled::{init, cleanup, put, take, try_take, unsafe_take, exists,
unsafe_borrow, try_unsafe_borrow};
/// Encapsulates a borrowed value. When this value goes out of scope, the
/// pointer is returned.
pub struct Borrowed<T> {
val: *(),
}
#[unsafe_destructor]
impl<T> Drop for Borrowed<T> {
fn drop(&mut self) {
unsafe {
if self.val.is_null() {
rtabort!("Aiee, returning null borrowed object!");
}
let val: ~T = cast::transmute(self.val);
put::<T>(val);
rtassert!(exists());
}
}
}
impl<T> Deref<T> for Borrowed<T> {
fn deref<'a>(&'a self) -> &'a T {
unsafe { &*(self.val as *T) }
}
}
impl<T> DerefMut<T> for Borrowed<T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut T {
unsafe { &mut *(self.val as *mut T) }
}
}
/// Borrow the thread-local value from thread-local storage.
/// While the value is borrowed it is not available in TLS.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline]
pub unsafe fn borrow<T>() -> Borrowed<T> {
let val: *() = cast::transmute(take::<T>());
Borrowed {
val: val,
}
}
/// Compiled implementation of accessing the runtime local pointer. This is
/// implemented using LLVM's thread_local attribute which isn't necessarily
/// working on all platforms. This implementation is faster, however, so we use
/// it wherever possible.
#[cfg(not(windows), not(target_os = "android"))]
pub mod compiled {
use cast;
use option::{Option, Some, None};
use ptr::RawPtr;
#[cfg(test)]
pub use realstd::rt::shouldnt_be_public::RT_TLS_PTR;
#[cfg(not(test))]
#[thread_local]
pub static mut RT_TLS_PTR: *mut u8 = 0 as *mut u8;
pub fn init() {}
pub unsafe fn cleanup() {}
// Rationale for all of these functions being inline(never)
//
// The #[thread_local] annotation gets propagated all the way through to
// LLVM, meaning the global is specially treated by LLVM to lower it to an
// efficient sequence of instructions. This also involves dealing with fun
// stuff in object files and whatnot. Regardless, it turns out this causes
// trouble with green threads and lots of optimizations turned on. The
// following case study was done on linux x86_64, but I would imagine that
// other platforms are similar.
//
// On linux, the instruction sequence for loading the tls pointer global
// looks like:
//
// mov %fs:0x0, %rax
// mov -0x8(%rax), %rbx
//
// This code leads me to believe that (%fs:0x0) is a table, and then the
// table contains the TLS values for the process. Hence, the slot at offset
// -0x8 is the task TLS pointer. This leads us to the conclusion that this
// table is the actual thread local part of each thread. The kernel sets up
// the fs segment selector to point at the right region of memory for each
// thread.
//
// Optimizations lead me to believe that this code is lowered to these
// instructions in the LLVM codegen passes, because you'll see code like
// this when everything is optimized:
//
// mov %fs:0x0, %r14
// mov -0x8(%r14), %rbx
// // do something with %rbx, the rust Task pointer
//
// ... // <- do more things
//
// mov -0x8(%r14), %rbx
// // do something else with %rbx
//
// Note that the optimization done here is that the first load is not
// duplicated during the lower instructions. This means that the %fs:0x0
// memory location is only dereferenced once.
//
// Normally, this is actually a good thing! With green threads, however,
// it's very possible for the code labeled "do more things" to context
// switch to another thread. If this happens, then we *must* re-load %fs:0x0
// because it's changed (we're on a different thread). If we don't re-load
// the table location, then we'll be reading the original thread's TLS
// values, not our thread's TLS values.
//
// Hence, we never inline these functions. By never inlining, we're
// guaranteed that loading the table is a local decision which is forced to
// *always* happen.
/// Give a pointer to thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline(never)] // see comments above
pub unsafe fn put<T>(sched: ~T) {
RT_TLS_PTR = cast::transmute(sched)
}
/// Take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline(never)] // see comments above
pub unsafe fn take<T>() -> ~T {
let ptr = RT_TLS_PTR;
rtassert!(!ptr.is_null());
let ptr: ~T = cast::transmute(ptr);
// can't use `as`, due to type not matching with `cfg(test)`
RT_TLS_PTR = cast::transmute(0);
ptr
}
/// Optionally take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline(never)] // see comments above
pub unsafe fn try_take<T>() -> Option<~T> {
let ptr = RT_TLS_PTR;
if ptr.is_null() {
None
} else {
let ptr: ~T = cast::transmute(ptr);
// can't use `as`, due to type not matching with `cfg(test)`
RT_TLS_PTR = cast::transmute(0);
Some(ptr)
}
}
/// Take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
/// Leaves the old pointer in TLS for speed.
#[inline(never)] // see comments above
pub unsafe fn unsafe_take<T>() -> ~T {
cast::transmute(RT_TLS_PTR)
}
/// Check whether there is a thread-local pointer installed.
#[inline(never)] // see comments above
pub fn exists() -> bool {
unsafe {
RT_TLS_PTR.is_not_null()
}
}
#[inline(never)] // see comments above
pub unsafe fn unsafe_borrow<T>() -> *mut T {
if RT_TLS_PTR.is_null() {
rtabort!("thread-local pointer is null. bogus!");
}
RT_TLS_PTR as *mut T
}
#[inline(never)] // see comments above
pub unsafe fn try_unsafe_borrow<T>() -> Option<*mut T> {
if RT_TLS_PTR.is_null() {
None
} else {
Some(RT_TLS_PTR as *mut T)
}
}
}
/// Native implementation of having the runtime thread-local pointer. This
/// implementation uses the `thread_local_storage` module to provide a
/// thread-local value.
pub mod native {
use cast;
use option::{Option, Some, None};
use ptr;
use ptr::RawPtr;
use tls = rt::thread_local_storage;
static mut RT_TLS_KEY: tls::Key = -1;
/// Initialize the TLS key. Other ops will fail if this isn't executed
/// first.
pub fn init() {
unsafe {
tls::create(&mut RT_TLS_KEY);
}
}
pub unsafe fn cleanup() {
rtassert!(RT_TLS_KEY!= -1);
tls::destroy(RT_TLS_KEY);
}
/// Give a pointer to thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline]
pub unsafe fn put<T>(sched: ~T) {
let key = tls_key();
let void_ptr: *mut u8 = cast::transmute(sched);
tls::set(key, void_ptr);
}
/// Take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline]
pub unsafe fn take<T>() -> ~T {
let key = tls_key();
let void_ptr: *mut u8 = tls::get(key);
if void_ptr.is_null() {
rtabort!("thread-local pointer is null. bogus!");
}
let ptr: ~T = cast::transmute(void_ptr);
tls::set(key, ptr::mut_null());
return ptr;
}
/// Optionally take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline]
pub unsafe fn try_take<T>() -> Option<~T> |
/// Take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
/// Leaves the old pointer in TLS for speed.
#[inline]
pub unsafe fn unsafe_take<T>() -> ~T {
let key = tls_key();
let void_ptr: *mut u8 = tls::get(key);
if void_ptr.is_null() {
rtabort!("thread-local pointer is null. bogus!");
}
let ptr: ~T = cast::transmute(void_ptr);
return ptr;
}
/// Check whether there is a thread-local pointer installed.
pub fn exists() -> bool {
unsafe {
match maybe_tls_key() {
Some(key) => tls::get(key).is_not_null(),
None => false
}
}
}
/// Borrow a mutable reference to the thread-local value
///
/// # Safety Note
///
/// Because this leaves the value in thread-local storage it is possible
/// For the Scheduler pointer to be aliased
pub unsafe fn unsafe_borrow<T>() -> *mut T {
let key = tls_key();
let void_ptr = tls::get(key);
if void_ptr.is_null() {
rtabort!("thread-local pointer is null. bogus!");
}
void_ptr as *mut T
}
pub unsafe fn try_unsafe_borrow<T>() -> Option<*mut T> {
match maybe_tls_key() {
Some(key) => {
let void_ptr = tls::get(key);
if void_ptr.is_null() {
None
} else {
Some(void_ptr as *mut T)
}
}
None => None
}
}
#[inline]
fn tls_key() -> tls::Key {
match maybe_tls_key() {
Some(key) => key,
None => rtabort!("runtime tls key not initialized")
}
}
#[inline]
#[cfg(not(test))]
#[allow(visible_private_types)]
pub fn maybe_tls_key() -> Option<tls::Key> {
unsafe {
// NB: This is a little racy because, while the key is
// initialized under a mutex and it's assumed to be initialized
// in the Scheduler ctor by any thread that needs to use it,
// we are not accessing the key under a mutex. Threads that
// are not using the new Scheduler but still *want to check*
// whether they are running under a new Scheduler may see a 0
// value here that is in the process of being initialized in
// another thread. I think this is fine since the only action
// they could take if it was initialized would be to check the
// thread-local value and see that it's not set.
if RT_TLS_KEY!= -1 {
return Some(RT_TLS_KEY);
} else {
return None;
}
}
}
#[inline] #[cfg(test)]
pub fn maybe_tls_key() -> Option<tls::Key> {
use realstd;
unsafe {
cast::transmute(realstd::rt::shouldnt_be_public::maybe_tls_key())
}
}
}
| {
match maybe_tls_key() {
Some(key) => {
let void_ptr: *mut u8 = tls::get(key);
if void_ptr.is_null() {
None
} else {
let ptr: ~T = cast::transmute(void_ptr);
tls::set(key, ptr::mut_null());
Some(ptr)
}
}
None => None
}
} | identifier_body |
local_ptr.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Access to a single thread-local pointer.
//!
//! The runtime will use this for storing ~Task.
//!
//! FIXME: Add runtime checks for usage of inconsistent pointer types.
//! and for overwriting an existing pointer.
#![allow(dead_code)]
use cast;
use ops::{Drop, Deref, DerefMut};
use ptr::RawPtr;
#[cfg(windows)] // mingw-w32 doesn't like thread_local things
#[cfg(target_os = "android")] // see #10686
pub use self::native::{init, cleanup, put, take, try_take, unsafe_take, exists,
unsafe_borrow, try_unsafe_borrow};
#[cfg(not(windows), not(target_os = "android"))]
pub use self::compiled::{init, cleanup, put, take, try_take, unsafe_take, exists,
unsafe_borrow, try_unsafe_borrow};
/// Encapsulates a borrowed value. When this value goes out of scope, the
/// pointer is returned.
pub struct Borrowed<T> {
val: *(),
}
#[unsafe_destructor]
impl<T> Drop for Borrowed<T> {
fn drop(&mut self) {
unsafe {
if self.val.is_null() {
rtabort!("Aiee, returning null borrowed object!");
}
let val: ~T = cast::transmute(self.val);
put::<T>(val);
rtassert!(exists());
}
}
}
impl<T> Deref<T> for Borrowed<T> {
fn deref<'a>(&'a self) -> &'a T {
unsafe { &*(self.val as *T) }
}
}
impl<T> DerefMut<T> for Borrowed<T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut T {
unsafe { &mut *(self.val as *mut T) }
}
}
/// Borrow the thread-local value from thread-local storage.
/// While the value is borrowed it is not available in TLS.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline]
pub unsafe fn borrow<T>() -> Borrowed<T> {
let val: *() = cast::transmute(take::<T>());
Borrowed {
val: val,
}
}
/// Compiled implementation of accessing the runtime local pointer. This is
/// implemented using LLVM's thread_local attribute which isn't necessarily
/// working on all platforms. This implementation is faster, however, so we use
/// it wherever possible.
#[cfg(not(windows), not(target_os = "android"))]
pub mod compiled {
use cast;
use option::{Option, Some, None};
use ptr::RawPtr;
#[cfg(test)]
pub use realstd::rt::shouldnt_be_public::RT_TLS_PTR;
#[cfg(not(test))]
#[thread_local]
pub static mut RT_TLS_PTR: *mut u8 = 0 as *mut u8;
pub fn init() {}
pub unsafe fn cleanup() {}
// Rationale for all of these functions being inline(never)
//
// The #[thread_local] annotation gets propagated all the way through to
// LLVM, meaning the global is specially treated by LLVM to lower it to an
// efficient sequence of instructions. This also involves dealing with fun
// stuff in object files and whatnot. Regardless, it turns out this causes
// trouble with green threads and lots of optimizations turned on. The
// following case study was done on linux x86_64, but I would imagine that
// other platforms are similar.
//
// On linux, the instruction sequence for loading the tls pointer global
// looks like:
//
// mov %fs:0x0, %rax
// mov -0x8(%rax), %rbx
//
// This code leads me to believe that (%fs:0x0) is a table, and then the
// table contains the TLS values for the process. Hence, the slot at offset
// -0x8 is the task TLS pointer. This leads us to the conclusion that this
// table is the actual thread local part of each thread. The kernel sets up
// the fs segment selector to point at the right region of memory for each
// thread.
//
// Optimizations lead me to believe that this code is lowered to these
// instructions in the LLVM codegen passes, because you'll see code like
// this when everything is optimized:
//
// mov %fs:0x0, %r14
// mov -0x8(%r14), %rbx
// // do something with %rbx, the rust Task pointer
//
// ... // <- do more things
//
// mov -0x8(%r14), %rbx
// // do something else with %rbx
//
// Note that the optimization done here is that the first load is not
// duplicated during the lower instructions. This means that the %fs:0x0
// memory location is only dereferenced once.
//
// Normally, this is actually a good thing! With green threads, however,
// it's very possible for the code labeled "do more things" to context
// switch to another thread. If this happens, then we *must* re-load %fs:0x0
// because it's changed (we're on a different thread). If we don't re-load
// the table location, then we'll be reading the original thread's TLS
// values, not our thread's TLS values.
//
// Hence, we never inline these functions. By never inlining, we're
// guaranteed that loading the table is a local decision which is forced to
// *always* happen.
/// Give a pointer to thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline(never)] // see comments above
pub unsafe fn put<T>(sched: ~T) {
RT_TLS_PTR = cast::transmute(sched)
}
/// Take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline(never)] // see comments above
pub unsafe fn take<T>() -> ~T {
let ptr = RT_TLS_PTR;
rtassert!(!ptr.is_null());
let ptr: ~T = cast::transmute(ptr);
// can't use `as`, due to type not matching with `cfg(test)`
RT_TLS_PTR = cast::transmute(0);
ptr
}
/// Optionally take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline(never)] // see comments above
pub unsafe fn try_take<T>() -> Option<~T> {
let ptr = RT_TLS_PTR;
if ptr.is_null() {
None
} else {
let ptr: ~T = cast::transmute(ptr);
// can't use `as`, due to type not matching with `cfg(test)`
RT_TLS_PTR = cast::transmute(0);
Some(ptr)
}
}
/// Take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
/// Leaves the old pointer in TLS for speed.
#[inline(never)] // see comments above
pub unsafe fn unsafe_take<T>() -> ~T {
cast::transmute(RT_TLS_PTR)
}
/// Check whether there is a thread-local pointer installed.
#[inline(never)] // see comments above
pub fn exists() -> bool {
unsafe {
RT_TLS_PTR.is_not_null()
}
}
#[inline(never)] // see comments above
pub unsafe fn unsafe_borrow<T>() -> *mut T {
if RT_TLS_PTR.is_null() {
rtabort!("thread-local pointer is null. bogus!");
}
RT_TLS_PTR as *mut T
}
#[inline(never)] // see comments above
pub unsafe fn try_unsafe_borrow<T>() -> Option<*mut T> {
if RT_TLS_PTR.is_null() {
None
} else {
Some(RT_TLS_PTR as *mut T)
}
}
}
/// Native implementation of having the runtime thread-local pointer. This
/// implementation uses the `thread_local_storage` module to provide a
/// thread-local value.
pub mod native {
use cast;
use option::{Option, Some, None};
use ptr;
use ptr::RawPtr;
use tls = rt::thread_local_storage;
static mut RT_TLS_KEY: tls::Key = -1;
/// Initialize the TLS key. Other ops will fail if this isn't executed
/// first.
pub fn init() {
unsafe {
tls::create(&mut RT_TLS_KEY);
}
}
pub unsafe fn cleanup() {
rtassert!(RT_TLS_KEY!= -1);
tls::destroy(RT_TLS_KEY);
}
/// Give a pointer to thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline]
pub unsafe fn put<T>(sched: ~T) {
let key = tls_key();
let void_ptr: *mut u8 = cast::transmute(sched);
tls::set(key, void_ptr);
}
/// Take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline]
pub unsafe fn take<T>() -> ~T {
let key = tls_key();
let void_ptr: *mut u8 = tls::get(key);
if void_ptr.is_null() {
rtabort!("thread-local pointer is null. bogus!");
}
let ptr: ~T = cast::transmute(void_ptr);
tls::set(key, ptr::mut_null());
return ptr;
}
/// Optionally take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
#[inline]
pub unsafe fn try_take<T>() -> Option<~T> {
match maybe_tls_key() {
Some(key) => {
let void_ptr: *mut u8 = tls::get(key);
if void_ptr.is_null() {
None
} else {
let ptr: ~T = cast::transmute(void_ptr);
tls::set(key, ptr::mut_null());
Some(ptr)
}
}
None => None
}
}
/// Take ownership of a pointer from thread-local storage.
///
/// # Safety note
///
/// Does not validate the pointer type.
/// Leaves the old pointer in TLS for speed.
#[inline] | let key = tls_key();
let void_ptr: *mut u8 = tls::get(key);
if void_ptr.is_null() {
rtabort!("thread-local pointer is null. bogus!");
}
let ptr: ~T = cast::transmute(void_ptr);
return ptr;
}
/// Check whether there is a thread-local pointer installed.
pub fn exists() -> bool {
unsafe {
match maybe_tls_key() {
Some(key) => tls::get(key).is_not_null(),
None => false
}
}
}
/// Borrow a mutable reference to the thread-local value
///
/// # Safety Note
///
/// Because this leaves the value in thread-local storage it is possible
/// For the Scheduler pointer to be aliased
pub unsafe fn unsafe_borrow<T>() -> *mut T {
let key = tls_key();
let void_ptr = tls::get(key);
if void_ptr.is_null() {
rtabort!("thread-local pointer is null. bogus!");
}
void_ptr as *mut T
}
pub unsafe fn try_unsafe_borrow<T>() -> Option<*mut T> {
match maybe_tls_key() {
Some(key) => {
let void_ptr = tls::get(key);
if void_ptr.is_null() {
None
} else {
Some(void_ptr as *mut T)
}
}
None => None
}
}
#[inline]
fn tls_key() -> tls::Key {
match maybe_tls_key() {
Some(key) => key,
None => rtabort!("runtime tls key not initialized")
}
}
#[inline]
#[cfg(not(test))]
#[allow(visible_private_types)]
pub fn maybe_tls_key() -> Option<tls::Key> {
unsafe {
// NB: This is a little racy because, while the key is
// initialized under a mutex and it's assumed to be initialized
// in the Scheduler ctor by any thread that needs to use it,
// we are not accessing the key under a mutex. Threads that
// are not using the new Scheduler but still *want to check*
// whether they are running under a new Scheduler may see a 0
// value here that is in the process of being initialized in
// another thread. I think this is fine since the only action
// they could take if it was initialized would be to check the
// thread-local value and see that it's not set.
if RT_TLS_KEY!= -1 {
return Some(RT_TLS_KEY);
} else {
return None;
}
}
}
#[inline] #[cfg(test)]
pub fn maybe_tls_key() -> Option<tls::Key> {
use realstd;
unsafe {
cast::transmute(realstd::rt::shouldnt_be_public::maybe_tls_key())
}
}
} | pub unsafe fn unsafe_take<T>() -> ~T { | random_line_split |
mod.rs | mod arrays;
mod strings;
use self::strings::unescape;
pub use self::{arrays::ArrayMethod, strings::StringMethod};
use super::Expander;
use crate::{parser::lexers::ArgumentSplitter, types};
use thiserror::Error;
#[derive(Debug, PartialEq, Clone)]
pub enum Pattern<'a> {
StringPattern(&'a str),
Whitespace,
}
#[derive(Debug)]
pub struct MethodArgs<'a, 'b, E: Expander> {
args: &'a str, | ///
/// Ex: `$join($scalar)` (can't join a scala) or `$unknown(@variable)` (unknown method)
#[derive(Debug, Clone, Error)]
pub enum MethodError {
/// Unknown array method
#[error("'{0}' is an unknown array method")]
InvalidArrayMethod(String),
/// Unknown scalar method
#[error("'{0}' is an unknown string method")]
InvalidScalarMethod(String),
/// A wrong argumeng was given to the method (extra, missing, or wrong type)
#[error("{0}: {1}")]
WrongArgument(&'static str, &'static str),
/// An invalid regex was provided. This is specific to the `matches` method
#[error("regex_replace: error in regular expression '{0}': {1}")]
InvalidRegex(String, #[source] regex::Error),
}
impl<'a, 'b, E: 'b + Expander> MethodArgs<'a, 'b, E> {
pub fn array(&mut self) -> impl Iterator<Item = types::Str> + '_ {
let expand = &mut (*self.expand);
ArgumentSplitter::new(self.args)
.flat_map(move |x| expand.expand_string(x).unwrap_or_else(|_| types::Args::new()))
.map(|s| unescape(&s))
}
pub fn join(self, pattern: &str) -> super::Result<types::Str, E::Error> {
Ok(unescape(&self.expand.expand_string(self.args)?.join(pattern)))
}
pub fn new(args: &'a str, expand: &'b mut E) -> MethodArgs<'a, 'b, E> {
MethodArgs { args, expand }
}
} | expand: &'b mut E,
}
/// Error during method expansion | random_line_split |
mod.rs | mod arrays;
mod strings;
use self::strings::unescape;
pub use self::{arrays::ArrayMethod, strings::StringMethod};
use super::Expander;
use crate::{parser::lexers::ArgumentSplitter, types};
use thiserror::Error;
#[derive(Debug, PartialEq, Clone)]
pub enum Pattern<'a> {
StringPattern(&'a str),
Whitespace,
}
#[derive(Debug)]
pub struct MethodArgs<'a, 'b, E: Expander> {
args: &'a str,
expand: &'b mut E,
}
/// Error during method expansion
///
/// Ex: `$join($scalar)` (can't join a scala) or `$unknown(@variable)` (unknown method)
#[derive(Debug, Clone, Error)]
pub enum MethodError {
/// Unknown array method
#[error("'{0}' is an unknown array method")]
InvalidArrayMethod(String),
/// Unknown scalar method
#[error("'{0}' is an unknown string method")]
InvalidScalarMethod(String),
/// A wrong argumeng was given to the method (extra, missing, or wrong type)
#[error("{0}: {1}")]
WrongArgument(&'static str, &'static str),
/// An invalid regex was provided. This is specific to the `matches` method
#[error("regex_replace: error in regular expression '{0}': {1}")]
InvalidRegex(String, #[source] regex::Error),
}
impl<'a, 'b, E: 'b + Expander> MethodArgs<'a, 'b, E> {
pub fn array(&mut self) -> impl Iterator<Item = types::Str> + '_ {
let expand = &mut (*self.expand);
ArgumentSplitter::new(self.args)
.flat_map(move |x| expand.expand_string(x).unwrap_or_else(|_| types::Args::new()))
.map(|s| unescape(&s))
}
pub fn join(self, pattern: &str) -> super::Result<types::Str, E::Error> {
Ok(unescape(&self.expand.expand_string(self.args)?.join(pattern)))
}
pub fn new(args: &'a str, expand: &'b mut E) -> MethodArgs<'a, 'b, E> |
}
| {
MethodArgs { args, expand }
} | identifier_body |
mod.rs | mod arrays;
mod strings;
use self::strings::unescape;
pub use self::{arrays::ArrayMethod, strings::StringMethod};
use super::Expander;
use crate::{parser::lexers::ArgumentSplitter, types};
use thiserror::Error;
#[derive(Debug, PartialEq, Clone)]
pub enum Pattern<'a> {
StringPattern(&'a str),
Whitespace,
}
#[derive(Debug)]
pub struct MethodArgs<'a, 'b, E: Expander> {
args: &'a str,
expand: &'b mut E,
}
/// Error during method expansion
///
/// Ex: `$join($scalar)` (can't join a scala) or `$unknown(@variable)` (unknown method)
#[derive(Debug, Clone, Error)]
pub enum MethodError {
/// Unknown array method
#[error("'{0}' is an unknown array method")]
InvalidArrayMethod(String),
/// Unknown scalar method
#[error("'{0}' is an unknown string method")]
InvalidScalarMethod(String),
/// A wrong argumeng was given to the method (extra, missing, or wrong type)
#[error("{0}: {1}")]
WrongArgument(&'static str, &'static str),
/// An invalid regex was provided. This is specific to the `matches` method
#[error("regex_replace: error in regular expression '{0}': {1}")]
InvalidRegex(String, #[source] regex::Error),
}
impl<'a, 'b, E: 'b + Expander> MethodArgs<'a, 'b, E> {
pub fn array(&mut self) -> impl Iterator<Item = types::Str> + '_ {
let expand = &mut (*self.expand);
ArgumentSplitter::new(self.args)
.flat_map(move |x| expand.expand_string(x).unwrap_or_else(|_| types::Args::new()))
.map(|s| unescape(&s))
}
pub fn | (self, pattern: &str) -> super::Result<types::Str, E::Error> {
Ok(unescape(&self.expand.expand_string(self.args)?.join(pattern)))
}
pub fn new(args: &'a str, expand: &'b mut E) -> MethodArgs<'a, 'b, E> {
MethodArgs { args, expand }
}
}
| join | identifier_name |
main.rs | #![allow(unused_variables)]
fn | () {
// Rust let bindings are immutable by default.
let z = 3;
// This will raise a compiler error:
// z += 2; //~ ERROR cannot assign twice to immutable variable `z`
// You must declare a variable mutable explicitly:
let mut x = 3;
// Similarly, references are immutable by default e.g.
// The following lines would raise a compiler error. Even though x is mutable, y is an
// immutable reference.
// let y = &x;
// *y += 2; //~ ERROR cannot borrow `x` as mutable because it is also borrowed as immutable
let y = &mut x;
*y += 2; // Works
// Note that though y is now a mutable reference, y itself is still immutable e.g.
// let mut z = 5;
// y = &mut z; //~ ERROR re-assignment of immutable variable `y`
}
| main | identifier_name |
main.rs | #![allow(unused_variables)]
fn main() {
// Rust let bindings are immutable by default.
let z = 3;
// This will raise a compiler error:
// z += 2; //~ ERROR cannot assign twice to immutable variable `z`
// You must declare a variable mutable explicitly:
let mut x = 3;
// Similarly, references are immutable by default e.g.
// The following lines would raise a compiler error. Even though x is mutable, y is an
// immutable reference.
// let y = &x;
// *y += 2; //~ ERROR cannot borrow `x` as mutable because it is also borrowed as immutable
let y = &mut x;
*y += 2; // Works
// Note that though y is now a mutable reference, y itself is still immutable e.g.
// let mut z = 5;
// y = &mut z; //~ ERROR re-assignment of immutable variable `y` | } | random_line_split |
|
main.rs | #![allow(unused_variables)]
fn main() | // y = &mut z; //~ ERROR re-assignment of immutable variable `y`
}
| {
// Rust let bindings are immutable by default.
let z = 3;
// This will raise a compiler error:
// z += 2; //~ ERROR cannot assign twice to immutable variable `z`
// You must declare a variable mutable explicitly:
let mut x = 3;
// Similarly, references are immutable by default e.g.
// The following lines would raise a compiler error. Even though x is mutable, y is an
// immutable reference.
// let y = &x;
// *y += 2; //~ ERROR cannot borrow `x` as mutable because it is also borrowed as immutable
let y = &mut x;
*y += 2; // Works
// Note that though y is now a mutable reference, y itself is still immutable e.g.
// let mut z = 5; | identifier_body |
ordered.rs | use std::fmt;
use std::collections::VecMap;
use std::collections::vec_map::Entry;
use std::sync::atomic::{AtomicUsize,Ordering};
#[allow(non_camel_case_types)]
#[derive(Copy)]
pub enum Category
{ TOKEN = 0,
RULE = 1,
STATE = 2 }
pub trait Ordered {
fn get_order(&self) -> usize;
}
pub struct Manager {
next_order: VecMap<AtomicUsize>
}
impl Manager {
pub fn new() -> Self {
Manager{next_order: VecMap::with_capacity(3)}
}
pub fn | (&mut self, c: Category) -> usize
{
match self.next_order.entry(c as usize) {
Entry::Vacant(entry) => { entry.insert(AtomicUsize::new(1));
0 },
Entry::Occupied(mut entry) => { let val = entry.get_mut();
val.fetch_add(1, Ordering::SeqCst) }
}
}
}
impl fmt::Debug for Manager {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ordered::Manager{{}}")
}
}
| next_order | identifier_name |
ordered.rs | use std::fmt;
use std::collections::VecMap;
use std::collections::vec_map::Entry;
use std::sync::atomic::{AtomicUsize,Ordering};
#[allow(non_camel_case_types)]
#[derive(Copy)]
pub enum Category
{ TOKEN = 0,
RULE = 1,
STATE = 2 }
pub trait Ordered {
fn get_order(&self) -> usize;
}
pub struct Manager {
next_order: VecMap<AtomicUsize>
}
impl Manager {
pub fn new() -> Self {
Manager{next_order: VecMap::with_capacity(3)}
}
pub fn next_order(&mut self, c: Category) -> usize
{
match self.next_order.entry(c as usize) {
Entry::Vacant(entry) => { entry.insert(AtomicUsize::new(1));
0 },
Entry::Occupied(mut entry) => { let val = entry.get_mut();
val.fetch_add(1, Ordering::SeqCst) }
}
}
}
impl fmt::Debug for Manager {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ordered::Manager{{}}")
} | } | random_line_split |
|
ordered.rs | use std::fmt;
use std::collections::VecMap;
use std::collections::vec_map::Entry;
use std::sync::atomic::{AtomicUsize,Ordering};
#[allow(non_camel_case_types)]
#[derive(Copy)]
pub enum Category
{ TOKEN = 0,
RULE = 1,
STATE = 2 }
pub trait Ordered {
fn get_order(&self) -> usize;
}
pub struct Manager {
next_order: VecMap<AtomicUsize>
}
impl Manager {
pub fn new() -> Self {
Manager{next_order: VecMap::with_capacity(3)}
}
pub fn next_order(&mut self, c: Category) -> usize
|
}
impl fmt::Debug for Manager {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ordered::Manager{{}}")
}
}
| {
match self.next_order.entry(c as usize) {
Entry::Vacant(entry) => { entry.insert(AtomicUsize::new(1));
0 },
Entry::Occupied(mut entry) => { let val = entry.get_mut();
val.fetch_add(1, Ordering::SeqCst) }
}
} | identifier_body |
ordered.rs | use std::fmt;
use std::collections::VecMap;
use std::collections::vec_map::Entry;
use std::sync::atomic::{AtomicUsize,Ordering};
#[allow(non_camel_case_types)]
#[derive(Copy)]
pub enum Category
{ TOKEN = 0,
RULE = 1,
STATE = 2 }
pub trait Ordered {
fn get_order(&self) -> usize;
}
pub struct Manager {
next_order: VecMap<AtomicUsize>
}
impl Manager {
pub fn new() -> Self {
Manager{next_order: VecMap::with_capacity(3)}
}
pub fn next_order(&mut self, c: Category) -> usize
{
match self.next_order.entry(c as usize) {
Entry::Vacant(entry) => | ,
Entry::Occupied(mut entry) => { let val = entry.get_mut();
val.fetch_add(1, Ordering::SeqCst) }
}
}
}
impl fmt::Debug for Manager {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ordered::Manager{{}}")
}
}
| { entry.insert(AtomicUsize::new(1));
0 } | conditional_block |
conditional-compile.rs | // xfail-fast
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Crate use statements
#[cfg(bogus)]
use flippity;
#[cfg(bogus)]
static b: bool = false;
static b: bool = true;
mod rustrt {
#[cfg(bogus)]
extern {
// This symbol doesn't exist and would be a link error if this
// module was translated
pub fn bogus();
}
extern {}
}
#[cfg(bogus)]
type t = int;
type t = bool;
#[cfg(bogus)]
enum tg { foo, }
enum tg { bar, }
#[cfg(bogus)]
struct r {
i: int,
}
#[cfg(bogus)]
fn r(i:int) -> r {
r {
i: i
}
}
struct | {
i: int,
}
fn r(i:int) -> r {
r {
i: i
}
}
#[cfg(bogus)]
mod m {
// This needs to parse but would fail in typeck. Since it's not in
// the current config it should not be typechecked.
pub fn bogus() { return 0; }
}
mod m {
// Submodules have slightly different code paths than the top-level
// module, so let's make sure this jazz works here as well
#[cfg(bogus)]
pub fn f() { }
pub fn f() { }
}
// Since the bogus configuration isn't defined main will just be
// parsed, but nothing further will be done with it
#[cfg(bogus)]
pub fn main() { fail!() }
pub fn main() {
// Exercise some of the configured items in ways that wouldn't be possible
// if they had the bogus definition
assert!((b));
let _x: t = true;
let _y: tg = bar;
test_in_fn_ctxt();
}
fn test_in_fn_ctxt() {
#[cfg(bogus)]
fn f() { fail!() }
fn f() { }
f();
#[cfg(bogus)]
static i: int = 0;
static i: int = 1;
assert_eq!(i, 1);
}
mod test_foreign_items {
pub mod rustrt {
extern {
#[cfg(bogus)]
pub fn write() -> ~str;
pub fn write() -> ~str;
}
}
}
mod test_use_statements {
#[cfg(bogus)]
use flippity_foo;
}
mod test_methods {
struct Foo {
bar: uint
}
impl Fooable for Foo {
#[cfg(bogus)]
fn what(&self) { }
fn what(&self) { }
#[cfg(bogus)]
fn the(&self) { }
fn the(&self) { }
}
trait Fooable {
#[cfg(bogus)]
fn what(&self);
fn what(&self);
#[cfg(bogus)]
fn the(&self);
fn the(&self);
}
}
| r | identifier_name |
conditional-compile.rs | // xfail-fast
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Crate use statements
#[cfg(bogus)]
use flippity;
#[cfg(bogus)]
static b: bool = false;
static b: bool = true;
mod rustrt {
#[cfg(bogus)]
extern {
// This symbol doesn't exist and would be a link error if this
// module was translated
pub fn bogus();
}
extern {}
}
#[cfg(bogus)]
type t = int;
type t = bool;
#[cfg(bogus)]
enum tg { foo, }
enum tg { bar, }
#[cfg(bogus)]
struct r {
i: int,
}
#[cfg(bogus)]
fn r(i:int) -> r {
r {
i: i
}
}
struct r {
i: int,
}
fn r(i:int) -> r {
r {
i: i
}
}
#[cfg(bogus)]
mod m {
// This needs to parse but would fail in typeck. Since it's not in
// the current config it should not be typechecked.
pub fn bogus() { return 0; }
}
mod m {
// Submodules have slightly different code paths than the top-level
// module, so let's make sure this jazz works here as well
#[cfg(bogus)]
pub fn f() { }
pub fn f() { }
}
// Since the bogus configuration isn't defined main will just be
// parsed, but nothing further will be done with it
#[cfg(bogus)]
pub fn main() { fail!() }
pub fn main() {
// Exercise some of the configured items in ways that wouldn't be possible
// if they had the bogus definition
assert!((b));
let _x: t = true;
let _y: tg = bar;
test_in_fn_ctxt();
}
fn test_in_fn_ctxt() {
#[cfg(bogus)]
fn f() { fail!() }
fn f() { }
f();
#[cfg(bogus)]
static i: int = 0;
static i: int = 1;
assert_eq!(i, 1);
}
mod test_foreign_items {
pub mod rustrt {
extern {
#[cfg(bogus)]
pub fn write() -> ~str;
pub fn write() -> ~str;
}
}
}
mod test_use_statements {
#[cfg(bogus)]
use flippity_foo;
}
| impl Fooable for Foo {
#[cfg(bogus)]
fn what(&self) { }
fn what(&self) { }
#[cfg(bogus)]
fn the(&self) { }
fn the(&self) { }
}
trait Fooable {
#[cfg(bogus)]
fn what(&self);
fn what(&self);
#[cfg(bogus)]
fn the(&self);
fn the(&self);
}
} | mod test_methods {
struct Foo {
bar: uint
}
| random_line_split |
conditional-compile.rs | // xfail-fast
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Crate use statements
#[cfg(bogus)]
use flippity;
#[cfg(bogus)]
static b: bool = false;
static b: bool = true;
mod rustrt {
#[cfg(bogus)]
extern {
// This symbol doesn't exist and would be a link error if this
// module was translated
pub fn bogus();
}
extern {}
}
#[cfg(bogus)]
type t = int;
type t = bool;
#[cfg(bogus)]
enum tg { foo, }
enum tg { bar, }
#[cfg(bogus)]
struct r {
i: int,
}
#[cfg(bogus)]
fn r(i:int) -> r {
r {
i: i
}
}
struct r {
i: int,
}
fn r(i:int) -> r {
r {
i: i
}
}
#[cfg(bogus)]
mod m {
// This needs to parse but would fail in typeck. Since it's not in
// the current config it should not be typechecked.
pub fn bogus() { return 0; }
}
mod m {
// Submodules have slightly different code paths than the top-level
// module, so let's make sure this jazz works here as well
#[cfg(bogus)]
pub fn f() { }
pub fn f() { }
}
// Since the bogus configuration isn't defined main will just be
// parsed, but nothing further will be done with it
#[cfg(bogus)]
pub fn main() { fail!() }
pub fn main() {
// Exercise some of the configured items in ways that wouldn't be possible
// if they had the bogus definition
assert!((b));
let _x: t = true;
let _y: tg = bar;
test_in_fn_ctxt();
}
fn test_in_fn_ctxt() {
#[cfg(bogus)]
fn f() { fail!() }
fn f() { }
f();
#[cfg(bogus)]
static i: int = 0;
static i: int = 1;
assert_eq!(i, 1);
}
mod test_foreign_items {
pub mod rustrt {
extern {
#[cfg(bogus)]
pub fn write() -> ~str;
pub fn write() -> ~str;
}
}
}
mod test_use_statements {
#[cfg(bogus)]
use flippity_foo;
}
mod test_methods {
struct Foo {
bar: uint
}
impl Fooable for Foo {
#[cfg(bogus)]
fn what(&self) { }
fn what(&self) |
#[cfg(bogus)]
fn the(&self) { }
fn the(&self) { }
}
trait Fooable {
#[cfg(bogus)]
fn what(&self);
fn what(&self);
#[cfg(bogus)]
fn the(&self);
fn the(&self);
}
}
| { } | identifier_body |
unused_async.rs | use clippy_utils::diagnostics::span_lint_and_help;
use rustc_hir::intravisit::{walk_expr, walk_fn, FnKind, NestedVisitorMap, Visitor};
use rustc_hir::{Body, Expr, ExprKind, FnDecl, FnHeader, HirId, IsAsync, YieldSource};
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::hir::map::Map;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::Span;
declare_clippy_lint! {
/// ### What it does
/// Checks for functions that are declared `async` but have no `.await`s inside of them.
///
/// ### Why is this bad?
/// Async functions with no async code create overhead, both mentally and computationally.
/// Callers of async methods either need to be calling from an async function themselves or run it on an executor, both of which
/// causes runtime overhead and hassle for the caller.
///
/// ### Example
/// ```rust
/// // Bad
/// async fn get_random_number() -> i64 {
/// 4 // Chosen by fair dice roll. Guaranteed to be random.
/// }
/// let number_future = get_random_number();
///
/// // Good
/// fn get_random_number_improved() -> i64 {
/// 4 // Chosen by fair dice roll. Guaranteed to be random.
/// }
/// let number_future = async { get_random_number_improved() };
/// ```
pub UNUSED_ASYNC,
pedantic,
"finds async functions with no await statements"
}
declare_lint_pass!(UnusedAsync => [UNUSED_ASYNC]);
struct AsyncFnVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
found_await: bool,
}
impl<'a, 'tcx> Visitor<'tcx> for AsyncFnVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, ex: &'tcx Expr<'tcx>) {
if let ExprKind::Yield(_, YieldSource::Await {.. }) = ex.kind {
self.found_await = true;
}
walk_expr(self, ex);
}
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::OnlyBodies(self.cx.tcx.hir())
}
}
impl<'tcx> LateLintPass<'tcx> for UnusedAsync {
fn | (
&mut self,
cx: &LateContext<'tcx>,
fn_kind: FnKind<'tcx>,
fn_decl: &'tcx FnDecl<'tcx>,
body: &Body<'tcx>,
span: Span,
hir_id: HirId,
) {
if let FnKind::ItemFn(_, _, FnHeader { asyncness,.. }, _) = &fn_kind {
if matches!(asyncness, IsAsync::Async) {
let mut visitor = AsyncFnVisitor { cx, found_await: false };
walk_fn(&mut visitor, fn_kind, fn_decl, body.id(), span, hir_id);
if!visitor.found_await {
span_lint_and_help(
cx,
UNUSED_ASYNC,
span,
"unused `async` for function with no await statements",
None,
"consider removing the `async` from this function",
);
}
}
}
}
}
| check_fn | identifier_name |
unused_async.rs | use clippy_utils::diagnostics::span_lint_and_help;
use rustc_hir::intravisit::{walk_expr, walk_fn, FnKind, NestedVisitorMap, Visitor};
use rustc_hir::{Body, Expr, ExprKind, FnDecl, FnHeader, HirId, IsAsync, YieldSource};
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::hir::map::Map;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::Span;
declare_clippy_lint! {
/// ### What it does
/// Checks for functions that are declared `async` but have no `.await`s inside of them.
///
/// ### Why is this bad?
/// Async functions with no async code create overhead, both mentally and computationally.
/// Callers of async methods either need to be calling from an async function themselves or run it on an executor, both of which
/// causes runtime overhead and hassle for the caller.
///
/// ### Example
/// ```rust
/// // Bad
/// async fn get_random_number() -> i64 {
/// 4 // Chosen by fair dice roll. Guaranteed to be random.
/// }
/// let number_future = get_random_number();
///
/// // Good
/// fn get_random_number_improved() -> i64 {
/// 4 // Chosen by fair dice roll. Guaranteed to be random.
/// }
/// let number_future = async { get_random_number_improved() };
/// ```
pub UNUSED_ASYNC,
pedantic,
"finds async functions with no await statements"
}
declare_lint_pass!(UnusedAsync => [UNUSED_ASYNC]);
struct AsyncFnVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
found_await: bool,
}
impl<'a, 'tcx> Visitor<'tcx> for AsyncFnVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, ex: &'tcx Expr<'tcx>) {
if let ExprKind::Yield(_, YieldSource::Await {.. }) = ex.kind {
self.found_await = true;
}
walk_expr(self, ex);
}
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::OnlyBodies(self.cx.tcx.hir())
}
}
impl<'tcx> LateLintPass<'tcx> for UnusedAsync {
fn check_fn(
&mut self,
cx: &LateContext<'tcx>,
fn_kind: FnKind<'tcx>,
fn_decl: &'tcx FnDecl<'tcx>,
body: &Body<'tcx>,
span: Span,
hir_id: HirId,
) {
if let FnKind::ItemFn(_, _, FnHeader { asyncness,.. }, _) = &fn_kind {
if matches!(asyncness, IsAsync::Async) {
let mut visitor = AsyncFnVisitor { cx, found_await: false };
walk_fn(&mut visitor, fn_kind, fn_decl, body.id(), span, hir_id);
if!visitor.found_await {
span_lint_and_help(
cx,
UNUSED_ASYNC,
span,
"unused `async` for function with no await statements",
None,
"consider removing the `async` from this function",
);
}
}
}
} | } | random_line_split |
|
unused_async.rs | use clippy_utils::diagnostics::span_lint_and_help;
use rustc_hir::intravisit::{walk_expr, walk_fn, FnKind, NestedVisitorMap, Visitor};
use rustc_hir::{Body, Expr, ExprKind, FnDecl, FnHeader, HirId, IsAsync, YieldSource};
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::hir::map::Map;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::Span;
declare_clippy_lint! {
/// ### What it does
/// Checks for functions that are declared `async` but have no `.await`s inside of them.
///
/// ### Why is this bad?
/// Async functions with no async code create overhead, both mentally and computationally.
/// Callers of async methods either need to be calling from an async function themselves or run it on an executor, both of which
/// causes runtime overhead and hassle for the caller.
///
/// ### Example
/// ```rust
/// // Bad
/// async fn get_random_number() -> i64 {
/// 4 // Chosen by fair dice roll. Guaranteed to be random.
/// }
/// let number_future = get_random_number();
///
/// // Good
/// fn get_random_number_improved() -> i64 {
/// 4 // Chosen by fair dice roll. Guaranteed to be random.
/// }
/// let number_future = async { get_random_number_improved() };
/// ```
pub UNUSED_ASYNC,
pedantic,
"finds async functions with no await statements"
}
declare_lint_pass!(UnusedAsync => [UNUSED_ASYNC]);
struct AsyncFnVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
found_await: bool,
}
impl<'a, 'tcx> Visitor<'tcx> for AsyncFnVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, ex: &'tcx Expr<'tcx>) {
if let ExprKind::Yield(_, YieldSource::Await {.. }) = ex.kind {
self.found_await = true;
}
walk_expr(self, ex);
}
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::OnlyBodies(self.cx.tcx.hir())
}
}
impl<'tcx> LateLintPass<'tcx> for UnusedAsync {
fn check_fn(
&mut self,
cx: &LateContext<'tcx>,
fn_kind: FnKind<'tcx>,
fn_decl: &'tcx FnDecl<'tcx>,
body: &Body<'tcx>,
span: Span,
hir_id: HirId,
) {
if let FnKind::ItemFn(_, _, FnHeader { asyncness,.. }, _) = &fn_kind |
}
}
| {
if matches!(asyncness, IsAsync::Async) {
let mut visitor = AsyncFnVisitor { cx, found_await: false };
walk_fn(&mut visitor, fn_kind, fn_decl, body.id(), span, hir_id);
if !visitor.found_await {
span_lint_and_help(
cx,
UNUSED_ASYNC,
span,
"unused `async` for function with no await statements",
None,
"consider removing the `async` from this function",
);
}
}
} | conditional_block |
unused_async.rs | use clippy_utils::diagnostics::span_lint_and_help;
use rustc_hir::intravisit::{walk_expr, walk_fn, FnKind, NestedVisitorMap, Visitor};
use rustc_hir::{Body, Expr, ExprKind, FnDecl, FnHeader, HirId, IsAsync, YieldSource};
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::hir::map::Map;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::Span;
declare_clippy_lint! {
/// ### What it does
/// Checks for functions that are declared `async` but have no `.await`s inside of them.
///
/// ### Why is this bad?
/// Async functions with no async code create overhead, both mentally and computationally.
/// Callers of async methods either need to be calling from an async function themselves or run it on an executor, both of which
/// causes runtime overhead and hassle for the caller.
///
/// ### Example
/// ```rust
/// // Bad
/// async fn get_random_number() -> i64 {
/// 4 // Chosen by fair dice roll. Guaranteed to be random.
/// }
/// let number_future = get_random_number();
///
/// // Good
/// fn get_random_number_improved() -> i64 {
/// 4 // Chosen by fair dice roll. Guaranteed to be random.
/// }
/// let number_future = async { get_random_number_improved() };
/// ```
pub UNUSED_ASYNC,
pedantic,
"finds async functions with no await statements"
}
declare_lint_pass!(UnusedAsync => [UNUSED_ASYNC]);
struct AsyncFnVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
found_await: bool,
}
impl<'a, 'tcx> Visitor<'tcx> for AsyncFnVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, ex: &'tcx Expr<'tcx>) {
if let ExprKind::Yield(_, YieldSource::Await {.. }) = ex.kind {
self.found_await = true;
}
walk_expr(self, ex);
}
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::OnlyBodies(self.cx.tcx.hir())
}
}
impl<'tcx> LateLintPass<'tcx> for UnusedAsync {
fn check_fn(
&mut self,
cx: &LateContext<'tcx>,
fn_kind: FnKind<'tcx>,
fn_decl: &'tcx FnDecl<'tcx>,
body: &Body<'tcx>,
span: Span,
hir_id: HirId,
) |
}
| {
if let FnKind::ItemFn(_, _, FnHeader { asyncness, .. }, _) = &fn_kind {
if matches!(asyncness, IsAsync::Async) {
let mut visitor = AsyncFnVisitor { cx, found_await: false };
walk_fn(&mut visitor, fn_kind, fn_decl, body.id(), span, hir_id);
if !visitor.found_await {
span_lint_and_help(
cx,
UNUSED_ASYNC,
span,
"unused `async` for function with no await statements",
None,
"consider removing the `async` from this function",
);
}
}
}
} | identifier_body |
simpleapi.rs | // Simple API example, ported from http://lua-users.org/wiki/SimpleLuaApiExample
// This is a simple introductory example of how to interface to Lua from Rust.
// The Rust program loads a Lua script file, sets some Lua variables, runs the
// Lua script, and reads back the return value.
| #![allow(non_snake_case)]
extern crate lua;
use std::io::{self, Write};
use std::path::Path;
use std::process;
fn main() {
let mut L = lua::State::new();
L.openlibs(); // Load Lua libraries
// Load the file containing the script we are going to run
let path = Path::new("simpleapi.lua");
match L.loadfile(Some(&path)) {
Ok(_) => (),
Err(_) => {
// If something went wrong, error message is at the top of the stack
let _ = writeln!(&mut io::stderr(),
"Couldn't load file: {}", L.describe(-1));
process::exit(1);
}
}
/*
* Ok, now here we go: We pass data to the lua script on the stack.
* That is, we first have to prepare Lua's virtual stack the way we
* want the script to receive it, then ask Lua to run it.
*/
L.newtable(); // We will pass a table
/*
* To put values into the table, we first push the index, then the
* value, and then call rawset() with the index of the table in the
* stack. Let's see why it's -3: In Lua, the value -1 always refers to
* the top of the stack. When you create the table with newtable(),
* the table gets pushed into the top of the stack. When you push the
* index and then the cell value, the stack looks like:
*
* - [stack bottom] -- table, index, value [top]
*
* So the -1 will refer to the cell value, thus -3 is used to refer to
* the table itself. Note that rawset() pops the last two elements
* of the stack, so that after it has been called, the table is at the
* top of the stack.
*/
for i in 1..6 {
L.pushinteger(i); // Push the table index
L.pushinteger(i*2); // Push the cell value
L.rawset(-3); // Stores the pair in the table
}
// By what name is the script going to reference our table?
L.setglobal("foo");
// Ask Lua to run our little script
match L.pcall(0, lua::MULTRET, 0) {
Ok(()) => (),
Err(_) => {
let _ = writeln!(&mut io::stderr(),
"Failed to run script: {}", L.describe(-1));
process::exit(1);
}
}
// Get the returned value at the to of the stack (index -1)
let sum = L.tonumber(-1);
println!("Script returned: {}", sum);
L.pop(1); // Take the returned value out of the stack
// L's destructor will close the state for us
} | random_line_split |
|
simpleapi.rs | // Simple API example, ported from http://lua-users.org/wiki/SimpleLuaApiExample
// This is a simple introductory example of how to interface to Lua from Rust.
// The Rust program loads a Lua script file, sets some Lua variables, runs the
// Lua script, and reads back the return value.
#![allow(non_snake_case)]
extern crate lua;
use std::io::{self, Write};
use std::path::Path;
use std::process;
fn main() | */
L.newtable(); // We will pass a table
/*
* To put values into the table, we first push the index, then the
* value, and then call rawset() with the index of the table in the
* stack. Let's see why it's -3: In Lua, the value -1 always refers to
* the top of the stack. When you create the table with newtable(),
* the table gets pushed into the top of the stack. When you push the
* index and then the cell value, the stack looks like:
*
* - [stack bottom] -- table, index, value [top]
*
* So the -1 will refer to the cell value, thus -3 is used to refer to
* the table itself. Note that rawset() pops the last two elements
* of the stack, so that after it has been called, the table is at the
* top of the stack.
*/
for i in 1..6 {
L.pushinteger(i); // Push the table index
L.pushinteger(i*2); // Push the cell value
L.rawset(-3); // Stores the pair in the table
}
// By what name is the script going to reference our table?
L.setglobal("foo");
// Ask Lua to run our little script
match L.pcall(0, lua::MULTRET, 0) {
Ok(()) => (),
Err(_) => {
let _ = writeln!(&mut io::stderr(),
"Failed to run script: {}", L.describe(-1));
process::exit(1);
}
}
// Get the returned value at the to of the stack (index -1)
let sum = L.tonumber(-1);
println!("Script returned: {}", sum);
L.pop(1); // Take the returned value out of the stack
// L's destructor will close the state for us
}
| {
let mut L = lua::State::new();
L.openlibs(); // Load Lua libraries
// Load the file containing the script we are going to run
let path = Path::new("simpleapi.lua");
match L.loadfile(Some(&path)) {
Ok(_) => (),
Err(_) => {
// If something went wrong, error message is at the top of the stack
let _ = writeln!(&mut io::stderr(),
"Couldn't load file: {}", L.describe(-1));
process::exit(1);
}
}
/*
* Ok, now here we go: We pass data to the lua script on the stack.
* That is, we first have to prepare Lua's virtual stack the way we
* want the script to receive it, then ask Lua to run it. | identifier_body |
simpleapi.rs | // Simple API example, ported from http://lua-users.org/wiki/SimpleLuaApiExample
// This is a simple introductory example of how to interface to Lua from Rust.
// The Rust program loads a Lua script file, sets some Lua variables, runs the
// Lua script, and reads back the return value.
#![allow(non_snake_case)]
extern crate lua;
use std::io::{self, Write};
use std::path::Path;
use std::process;
fn | () {
let mut L = lua::State::new();
L.openlibs(); // Load Lua libraries
// Load the file containing the script we are going to run
let path = Path::new("simpleapi.lua");
match L.loadfile(Some(&path)) {
Ok(_) => (),
Err(_) => {
// If something went wrong, error message is at the top of the stack
let _ = writeln!(&mut io::stderr(),
"Couldn't load file: {}", L.describe(-1));
process::exit(1);
}
}
/*
* Ok, now here we go: We pass data to the lua script on the stack.
* That is, we first have to prepare Lua's virtual stack the way we
* want the script to receive it, then ask Lua to run it.
*/
L.newtable(); // We will pass a table
/*
* To put values into the table, we first push the index, then the
* value, and then call rawset() with the index of the table in the
* stack. Let's see why it's -3: In Lua, the value -1 always refers to
* the top of the stack. When you create the table with newtable(),
* the table gets pushed into the top of the stack. When you push the
* index and then the cell value, the stack looks like:
*
* - [stack bottom] -- table, index, value [top]
*
* So the -1 will refer to the cell value, thus -3 is used to refer to
* the table itself. Note that rawset() pops the last two elements
* of the stack, so that after it has been called, the table is at the
* top of the stack.
*/
for i in 1..6 {
L.pushinteger(i); // Push the table index
L.pushinteger(i*2); // Push the cell value
L.rawset(-3); // Stores the pair in the table
}
// By what name is the script going to reference our table?
L.setglobal("foo");
// Ask Lua to run our little script
match L.pcall(0, lua::MULTRET, 0) {
Ok(()) => (),
Err(_) => {
let _ = writeln!(&mut io::stderr(),
"Failed to run script: {}", L.describe(-1));
process::exit(1);
}
}
// Get the returned value at the to of the stack (index -1)
let sum = L.tonumber(-1);
println!("Script returned: {}", sum);
L.pop(1); // Take the returned value out of the stack
// L's destructor will close the state for us
}
| main | identifier_name |
route-list.rs | extern crate neli;
use std::error::Error;
use std::net::IpAddr;
use neli::consts::*;
use neli::err::NlError;
use neli::nl::Nlmsghdr;
use neli::rtnl::*;
use neli::socket::*;
fn parse_route_table(rtm: Nlmsghdr<Rtm, Rtmsg>) | Rta::Dst => dst = to_addr(&attr.rta_payload),
Rta::Prefsrc => src = to_addr(&attr.rta_payload),
Rta::Gateway => gateway = to_addr(&attr.rta_payload),
_ => (),
}
}
if let Some(dst) = dst {
print!("{}/{} ", dst, rtm.nl_payload.rtm_dst_len);
} else {
print!("default ");
if let Some(gateway) = gateway {
print!("via {} ", gateway);
}
}
if rtm.nl_payload.rtm_scope!= RtScope::Universe {
print!(
" proto {:?} scope {:?} ",
rtm.nl_payload.rtm_protocol, rtm.nl_payload.rtm_scope
)
}
if let Some(src) = src {
print!(" src {} ", src);
}
println!();
}
}
/// This sample is a simple imitation of the `ip route` command, to demonstrate interaction
/// with the rtnetlink subsystem.
fn main() -> Result<(), Box<dyn Error>> {
let mut socket = NlSocket::connect(NlFamily::Route, None, None, true).unwrap();
let rtmsg = Rtmsg {
rtm_family: RtAddrFamily::Inet,
rtm_dst_len: 0,
rtm_src_len: 0,
rtm_tos: 0,
rtm_table: RtTable::Unspec,
rtm_protocol: Rtprot::Unspec,
rtm_scope: RtScope::Universe,
rtm_type: Rtn::Unspec,
rtm_flags: vec![],
rtattrs: Rtattrs::empty(),
};
let nlhdr = {
let len = None;
let nl_type = Rtm::Getroute;
let flags = vec![NlmF::Request, NlmF::Dump];
let seq = None;
let pid = None;
let payload = rtmsg;
Nlmsghdr::new(len, nl_type, flags, seq, pid, payload)
};
socket.send_nl(nlhdr).unwrap();
// Provisionally deserialize as a Nlmsg first.
let nl = socket.recv_nl::<Rtm, Rtmsg>(None)?;
let multi_msg = nl.nl_flags.contains(&NlmF::Multi);
parse_route_table(nl);
if multi_msg {
while let Ok(nl) = socket.recv_nl::<u16, Rtmsg>(None) {
match Nlmsg::from(nl.nl_type) {
Nlmsg::Done => return Ok(()),
Nlmsg::Error => return Err(Box::new(NlError::new("rtnetlink error."))),
_ => {
let rtm = Nlmsghdr {
nl_len: nl.nl_len,
nl_type: Rtm::from(nl.nl_type),
nl_flags: nl.nl_flags,
nl_seq: nl.nl_seq,
nl_pid: nl.nl_pid,
nl_payload: nl.nl_payload,
};
// Some other message type, so let's try to deserialize as a Rtm.
parse_route_table(rtm)
}
}
}
}
Ok(())
}
| {
// This sample is only interested in the main table.
if rtm.nl_payload.rtm_table == RtTable::Main {
let mut src = None;
let mut dst = None;
let mut gateway = None;
for attr in rtm.nl_payload.rtattrs.iter() {
fn to_addr(b: &[u8]) -> Option<IpAddr> {
use std::convert::TryFrom;
if let Ok(tup) = <&[u8; 4]>::try_from(b) {
Some(IpAddr::from(*tup))
} else if let Ok(tup) = <&[u8; 16]>::try_from(b) {
Some(IpAddr::from(*tup))
} else {
None
}
}
match attr.rta_type { | identifier_body |
route-list.rs | extern crate neli;
use std::error::Error;
use std::net::IpAddr;
use neli::consts::*;
use neli::err::NlError;
use neli::nl::Nlmsghdr;
use neli::rtnl::*;
use neli::socket::*;
fn parse_route_table(rtm: Nlmsghdr<Rtm, Rtmsg>) {
// This sample is only interested in the main table.
if rtm.nl_payload.rtm_table == RtTable::Main {
let mut src = None;
let mut dst = None;
let mut gateway = None;
for attr in rtm.nl_payload.rtattrs.iter() {
fn to_addr(b: &[u8]) -> Option<IpAddr> {
use std::convert::TryFrom;
if let Ok(tup) = <&[u8; 4]>::try_from(b) {
Some(IpAddr::from(*tup))
} else if let Ok(tup) = <&[u8; 16]>::try_from(b) {
Some(IpAddr::from(*tup))
} else {
None
}
}
match attr.rta_type {
Rta::Dst => dst = to_addr(&attr.rta_payload),
Rta::Prefsrc => src = to_addr(&attr.rta_payload),
Rta::Gateway => gateway = to_addr(&attr.rta_payload),
_ => (),
}
}
if let Some(dst) = dst {
print!("{}/{} ", dst, rtm.nl_payload.rtm_dst_len);
} else {
print!("default ");
if let Some(gateway) = gateway {
print!("via {} ", gateway);
}
}
if rtm.nl_payload.rtm_scope!= RtScope::Universe {
print!(
" proto {:?} scope {:?} ",
rtm.nl_payload.rtm_protocol, rtm.nl_payload.rtm_scope
)
}
if let Some(src) = src {
print!(" src {} ", src);
}
println!();
}
}
/// This sample is a simple imitation of the `ip route` command, to demonstrate interaction
/// with the rtnetlink subsystem.
fn | () -> Result<(), Box<dyn Error>> {
let mut socket = NlSocket::connect(NlFamily::Route, None, None, true).unwrap();
let rtmsg = Rtmsg {
rtm_family: RtAddrFamily::Inet,
rtm_dst_len: 0,
rtm_src_len: 0,
rtm_tos: 0,
rtm_table: RtTable::Unspec,
rtm_protocol: Rtprot::Unspec,
rtm_scope: RtScope::Universe,
rtm_type: Rtn::Unspec,
rtm_flags: vec![],
rtattrs: Rtattrs::empty(),
};
let nlhdr = {
let len = None;
let nl_type = Rtm::Getroute;
let flags = vec![NlmF::Request, NlmF::Dump];
let seq = None;
let pid = None;
let payload = rtmsg;
Nlmsghdr::new(len, nl_type, flags, seq, pid, payload)
};
socket.send_nl(nlhdr).unwrap();
// Provisionally deserialize as a Nlmsg first.
let nl = socket.recv_nl::<Rtm, Rtmsg>(None)?;
let multi_msg = nl.nl_flags.contains(&NlmF::Multi);
parse_route_table(nl);
if multi_msg {
while let Ok(nl) = socket.recv_nl::<u16, Rtmsg>(None) {
match Nlmsg::from(nl.nl_type) {
Nlmsg::Done => return Ok(()),
Nlmsg::Error => return Err(Box::new(NlError::new("rtnetlink error."))),
_ => {
let rtm = Nlmsghdr {
nl_len: nl.nl_len,
nl_type: Rtm::from(nl.nl_type),
nl_flags: nl.nl_flags,
nl_seq: nl.nl_seq,
nl_pid: nl.nl_pid,
nl_payload: nl.nl_payload,
};
// Some other message type, so let's try to deserialize as a Rtm.
parse_route_table(rtm)
}
}
}
}
Ok(())
}
| main | identifier_name |
route-list.rs | extern crate neli;
use std::error::Error;
use std::net::IpAddr;
use neli::consts::*;
use neli::err::NlError;
use neli::nl::Nlmsghdr;
use neli::rtnl::*;
use neli::socket::*;
fn parse_route_table(rtm: Nlmsghdr<Rtm, Rtmsg>) {
// This sample is only interested in the main table.
if rtm.nl_payload.rtm_table == RtTable::Main {
let mut src = None;
let mut dst = None;
let mut gateway = None;
for attr in rtm.nl_payload.rtattrs.iter() {
fn to_addr(b: &[u8]) -> Option<IpAddr> {
use std::convert::TryFrom;
if let Ok(tup) = <&[u8; 4]>::try_from(b) {
Some(IpAddr::from(*tup))
} else if let Ok(tup) = <&[u8; 16]>::try_from(b) {
Some(IpAddr::from(*tup))
} else {
None
}
}
match attr.rta_type {
Rta::Dst => dst = to_addr(&attr.rta_payload),
Rta::Prefsrc => src = to_addr(&attr.rta_payload),
Rta::Gateway => gateway = to_addr(&attr.rta_payload),
_ => (),
}
}
if let Some(dst) = dst {
print!("{}/{} ", dst, rtm.nl_payload.rtm_dst_len);
} else {
print!("default ");
if let Some(gateway) = gateway {
print!("via {} ", gateway);
}
}
if rtm.nl_payload.rtm_scope!= RtScope::Universe {
print!(
" proto {:?} scope {:?} ",
rtm.nl_payload.rtm_protocol, rtm.nl_payload.rtm_scope
)
}
if let Some(src) = src {
print!(" src {} ", src);
}
println!();
}
}
/// This sample is a simple imitation of the `ip route` command, to demonstrate interaction
/// with the rtnetlink subsystem.
fn main() -> Result<(), Box<dyn Error>> {
let mut socket = NlSocket::connect(NlFamily::Route, None, None, true).unwrap();
let rtmsg = Rtmsg {
rtm_family: RtAddrFamily::Inet,
rtm_dst_len: 0,
rtm_src_len: 0,
rtm_tos: 0,
rtm_table: RtTable::Unspec,
rtm_protocol: Rtprot::Unspec,
rtm_scope: RtScope::Universe,
rtm_type: Rtn::Unspec,
rtm_flags: vec![],
rtattrs: Rtattrs::empty(),
};
let nlhdr = {
let len = None;
let nl_type = Rtm::Getroute;
let flags = vec![NlmF::Request, NlmF::Dump];
let seq = None;
let pid = None;
let payload = rtmsg;
Nlmsghdr::new(len, nl_type, flags, seq, pid, payload)
};
socket.send_nl(nlhdr).unwrap();
// Provisionally deserialize as a Nlmsg first.
let nl = socket.recv_nl::<Rtm, Rtmsg>(None)?;
let multi_msg = nl.nl_flags.contains(&NlmF::Multi);
parse_route_table(nl);
if multi_msg {
while let Ok(nl) = socket.recv_nl::<u16, Rtmsg>(None) {
match Nlmsg::from(nl.nl_type) {
Nlmsg::Done => return Ok(()),
Nlmsg::Error => return Err(Box::new(NlError::new("rtnetlink error."))),
_ => {
let rtm = Nlmsghdr {
nl_len: nl.nl_len,
nl_type: Rtm::from(nl.nl_type),
nl_flags: nl.nl_flags,
nl_seq: nl.nl_seq,
nl_pid: nl.nl_pid, | }
}
}
}
Ok(())
} | nl_payload: nl.nl_payload,
};
// Some other message type, so let's try to deserialize as a Rtm.
parse_route_table(rtm) | random_line_split |
proto.rs |
use bytes::{BufMut, BytesMut};
use futures::Future;
use std::{io, str};
use std::net::SocketAddr;
use tokio_core::net::TcpStream;
use tokio_core::reactor::Handle;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::codec::{Decoder, Encoder, Framed};
use tokio_proto::TcpClient;
use tokio_proto::multiplex::{ClientProto, ClientService, RequestId};
use tokio_service::Service;
use unterflow_protocol::TransportMessage;
use unterflow_protocol::frame::DataFrameHeader;
use unterflow_protocol::io::{FromBytes, HasBlockLength, ToBytes};
pub struct MultiplexedClient {
inner: ClientService<TcpStream, MultiplexedProto>,
}
impl MultiplexedClient {
pub fn connect(
addr: &SocketAddr,
handle: &Handle,
) -> Box<Future<Item = MultiplexedClient, Error = io::Error>> {
Box::new(TcpClient::new(MultiplexedProto).connect(addr, handle).map(
|service| MultiplexedClient { inner: service },
))
}
}
impl Service for MultiplexedClient {
type Request = TransportMessage;
type Response = TransportMessage;
type Error = io::Error;
type Future = Box<Future<Item = Self::Response, Error = Self::Error>>;
fn call(&self, request: Self::Request) -> Self::Future {
Box::new(self.inner.call(request))
}
}
struct MultiplexedCodec {
header: Option<DataFrameHeader>,
}
impl MultiplexedCodec {
fn decode_frame(
&mut self,
header: DataFrameHeader,
buffer: &mut BytesMut,
) -> Result<Option<(RequestId, TransportMessage)>, io::Error> {
let frame_length = header.aligned_length() - DataFrameHeader::block_length() as usize;
if buffer.len() < frame_length {
self.header = Some(header);
Ok(None)
} else {
let frame = buffer.split_to(frame_length);
let mut reader = io::Cursor::new(frame);
let frame = TransportMessage::read(header, &mut reader)?;
let request_id = match frame {
TransportMessage::RequestResponse(ref r) => r.request_header.request_id,
r => {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("Expected request response message but got {:?}", r),
))
}
};
Ok(Some((request_id as RequestId, frame)))
}
}
}
impl Decoder for MultiplexedCodec {
type Item = (RequestId, TransportMessage);
type Error = io::Error;
fn decode(&mut self, buffer: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if let Some(header) = self.header.take() {
self.decode_frame(header, buffer)
} else {
let header_length = DataFrameHeader::block_length() as usize;
if buffer.len() < header_length {
Ok(None)
} else {
let header = buffer.split_to(header_length);
let mut reader = io::Cursor::new(header);
let header = DataFrameHeader::from_bytes(&mut reader)?;
self.decode_frame(header, buffer)
}
}
}
}
impl Encoder for MultiplexedCodec {
type Item = (RequestId, TransportMessage);
type Error = io::Error;
fn encode(&mut self, request: Self::Item, buffer: &mut BytesMut) -> Result<(), io::Error> {
let (request_id, mut request) = request;
match request {
TransportMessage::RequestResponse(ref mut r) => {
r.request_header.request_id = request_id;
}
r => {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("Expected request response message but got {:?}", r),
))
}
};
let length = request.length();
if buffer.remaining_mut() < length {
buffer.reserve(length);
}
let mut writer = buffer.writer();
request.to_bytes(&mut writer)
}
}
struct MultiplexedProto;
impl<T: AsyncRead + AsyncWrite +'static> ClientProto<T> for MultiplexedProto {
type Request = TransportMessage;
type Response = TransportMessage;
type Transport = Framed<T, MultiplexedCodec>;
type BindTransport = Result<Self::Transport, io::Error>;
fn bind_transport(&self, io: T) -> Self::BindTransport |
}
| {
Ok(io.framed(MultiplexedCodec { header: None }))
} | identifier_body |
proto.rs |
use bytes::{BufMut, BytesMut};
use futures::Future;
use std::{io, str};
use std::net::SocketAddr;
use tokio_core::net::TcpStream;
use tokio_core::reactor::Handle;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::codec::{Decoder, Encoder, Framed};
use tokio_proto::TcpClient;
use tokio_proto::multiplex::{ClientProto, ClientService, RequestId};
use tokio_service::Service;
use unterflow_protocol::TransportMessage;
use unterflow_protocol::frame::DataFrameHeader;
use unterflow_protocol::io::{FromBytes, HasBlockLength, ToBytes};
pub struct MultiplexedClient {
inner: ClientService<TcpStream, MultiplexedProto>,
}
impl MultiplexedClient {
pub fn connect(
addr: &SocketAddr,
handle: &Handle,
) -> Box<Future<Item = MultiplexedClient, Error = io::Error>> {
Box::new(TcpClient::new(MultiplexedProto).connect(addr, handle).map(
|service| MultiplexedClient { inner: service },
))
}
}
impl Service for MultiplexedClient {
type Request = TransportMessage;
type Response = TransportMessage;
type Error = io::Error;
type Future = Box<Future<Item = Self::Response, Error = Self::Error>>;
fn call(&self, request: Self::Request) -> Self::Future {
Box::new(self.inner.call(request))
}
}
struct MultiplexedCodec {
header: Option<DataFrameHeader>,
}
impl MultiplexedCodec {
fn decode_frame(
&mut self,
header: DataFrameHeader,
buffer: &mut BytesMut,
) -> Result<Option<(RequestId, TransportMessage)>, io::Error> {
let frame_length = header.aligned_length() - DataFrameHeader::block_length() as usize;
if buffer.len() < frame_length {
self.header = Some(header);
Ok(None)
} else {
let frame = buffer.split_to(frame_length);
let mut reader = io::Cursor::new(frame);
let frame = TransportMessage::read(header, &mut reader)?;
let request_id = match frame {
TransportMessage::RequestResponse(ref r) => r.request_header.request_id,
r => {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("Expected request response message but got {:?}", r),
))
}
};
Ok(Some((request_id as RequestId, frame)))
}
}
}
impl Decoder for MultiplexedCodec {
type Item = (RequestId, TransportMessage);
type Error = io::Error;
fn decode(&mut self, buffer: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if let Some(header) = self.header.take() {
self.decode_frame(header, buffer)
} else {
let header_length = DataFrameHeader::block_length() as usize;
if buffer.len() < header_length | else {
let header = buffer.split_to(header_length);
let mut reader = io::Cursor::new(header);
let header = DataFrameHeader::from_bytes(&mut reader)?;
self.decode_frame(header, buffer)
}
}
}
}
impl Encoder for MultiplexedCodec {
type Item = (RequestId, TransportMessage);
type Error = io::Error;
fn encode(&mut self, request: Self::Item, buffer: &mut BytesMut) -> Result<(), io::Error> {
let (request_id, mut request) = request;
match request {
TransportMessage::RequestResponse(ref mut r) => {
r.request_header.request_id = request_id;
}
r => {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("Expected request response message but got {:?}", r),
))
}
};
let length = request.length();
if buffer.remaining_mut() < length {
buffer.reserve(length);
}
let mut writer = buffer.writer();
request.to_bytes(&mut writer)
}
}
struct MultiplexedProto;
impl<T: AsyncRead + AsyncWrite +'static> ClientProto<T> for MultiplexedProto {
type Request = TransportMessage;
type Response = TransportMessage;
type Transport = Framed<T, MultiplexedCodec>;
type BindTransport = Result<Self::Transport, io::Error>;
fn bind_transport(&self, io: T) -> Self::BindTransport {
Ok(io.framed(MultiplexedCodec { header: None }))
}
}
| {
Ok(None)
} | conditional_block |
proto.rs | use bytes::{BufMut, BytesMut};
use futures::Future;
use std::{io, str};
use std::net::SocketAddr;
use tokio_core::net::TcpStream;
use tokio_core::reactor::Handle;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::codec::{Decoder, Encoder, Framed};
use tokio_proto::TcpClient;
use tokio_proto::multiplex::{ClientProto, ClientService, RequestId};
use tokio_service::Service;
use unterflow_protocol::TransportMessage;
use unterflow_protocol::frame::DataFrameHeader;
use unterflow_protocol::io::{FromBytes, HasBlockLength, ToBytes};
pub struct MultiplexedClient {
inner: ClientService<TcpStream, MultiplexedProto>,
}
impl MultiplexedClient {
pub fn connect(
addr: &SocketAddr,
handle: &Handle,
) -> Box<Future<Item = MultiplexedClient, Error = io::Error>> {
Box::new(TcpClient::new(MultiplexedProto).connect(addr, handle).map(
|service| MultiplexedClient { inner: service },
))
}
}
impl Service for MultiplexedClient {
type Request = TransportMessage;
type Response = TransportMessage;
type Error = io::Error;
type Future = Box<Future<Item = Self::Response, Error = Self::Error>>;
fn call(&self, request: Self::Request) -> Self::Future {
Box::new(self.inner.call(request))
}
}
struct MultiplexedCodec {
header: Option<DataFrameHeader>,
}
impl MultiplexedCodec {
fn decode_frame(
&mut self,
header: DataFrameHeader,
buffer: &mut BytesMut,
) -> Result<Option<(RequestId, TransportMessage)>, io::Error> {
let frame_length = header.aligned_length() - DataFrameHeader::block_length() as usize;
if buffer.len() < frame_length {
self.header = Some(header);
Ok(None)
} else {
let frame = buffer.split_to(frame_length);
let mut reader = io::Cursor::new(frame);
let frame = TransportMessage::read(header, &mut reader)?;
let request_id = match frame {
TransportMessage::RequestResponse(ref r) => r.request_header.request_id,
r => {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("Expected request response message but got {:?}", r),
))
}
};
Ok(Some((request_id as RequestId, frame)))
}
}
}
impl Decoder for MultiplexedCodec {
type Item = (RequestId, TransportMessage);
type Error = io::Error;
fn decode(&mut self, buffer: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if let Some(header) = self.header.take() {
self.decode_frame(header, buffer)
} else {
let header_length = DataFrameHeader::block_length() as usize;
if buffer.len() < header_length {
Ok(None)
} else {
let header = buffer.split_to(header_length);
let mut reader = io::Cursor::new(header);
let header = DataFrameHeader::from_bytes(&mut reader)?;
self.decode_frame(header, buffer)
}
}
}
}
impl Encoder for MultiplexedCodec {
type Item = (RequestId, TransportMessage);
type Error = io::Error;
fn encode(&mut self, request: Self::Item, buffer: &mut BytesMut) -> Result<(), io::Error> {
let (request_id, mut request) = request;
match request {
TransportMessage::RequestResponse(ref mut r) => {
r.request_header.request_id = request_id;
}
r => {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("Expected request response message but got {:?}", r),
))
}
};
let length = request.length();
if buffer.remaining_mut() < length {
buffer.reserve(length);
}
let mut writer = buffer.writer();
request.to_bytes(&mut writer)
}
}
struct MultiplexedProto;
impl<T: AsyncRead + AsyncWrite +'static> ClientProto<T> for MultiplexedProto {
type Request = TransportMessage;
type Response = TransportMessage; |
fn bind_transport(&self, io: T) -> Self::BindTransport {
Ok(io.framed(MultiplexedCodec { header: None }))
}
} | type Transport = Framed<T, MultiplexedCodec>;
type BindTransport = Result<Self::Transport, io::Error>; | random_line_split |
proto.rs |
use bytes::{BufMut, BytesMut};
use futures::Future;
use std::{io, str};
use std::net::SocketAddr;
use tokio_core::net::TcpStream;
use tokio_core::reactor::Handle;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::codec::{Decoder, Encoder, Framed};
use tokio_proto::TcpClient;
use tokio_proto::multiplex::{ClientProto, ClientService, RequestId};
use tokio_service::Service;
use unterflow_protocol::TransportMessage;
use unterflow_protocol::frame::DataFrameHeader;
use unterflow_protocol::io::{FromBytes, HasBlockLength, ToBytes};
pub struct | {
inner: ClientService<TcpStream, MultiplexedProto>,
}
impl MultiplexedClient {
pub fn connect(
addr: &SocketAddr,
handle: &Handle,
) -> Box<Future<Item = MultiplexedClient, Error = io::Error>> {
Box::new(TcpClient::new(MultiplexedProto).connect(addr, handle).map(
|service| MultiplexedClient { inner: service },
))
}
}
impl Service for MultiplexedClient {
type Request = TransportMessage;
type Response = TransportMessage;
type Error = io::Error;
type Future = Box<Future<Item = Self::Response, Error = Self::Error>>;
fn call(&self, request: Self::Request) -> Self::Future {
Box::new(self.inner.call(request))
}
}
struct MultiplexedCodec {
header: Option<DataFrameHeader>,
}
impl MultiplexedCodec {
fn decode_frame(
&mut self,
header: DataFrameHeader,
buffer: &mut BytesMut,
) -> Result<Option<(RequestId, TransportMessage)>, io::Error> {
let frame_length = header.aligned_length() - DataFrameHeader::block_length() as usize;
if buffer.len() < frame_length {
self.header = Some(header);
Ok(None)
} else {
let frame = buffer.split_to(frame_length);
let mut reader = io::Cursor::new(frame);
let frame = TransportMessage::read(header, &mut reader)?;
let request_id = match frame {
TransportMessage::RequestResponse(ref r) => r.request_header.request_id,
r => {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("Expected request response message but got {:?}", r),
))
}
};
Ok(Some((request_id as RequestId, frame)))
}
}
}
impl Decoder for MultiplexedCodec {
type Item = (RequestId, TransportMessage);
type Error = io::Error;
fn decode(&mut self, buffer: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if let Some(header) = self.header.take() {
self.decode_frame(header, buffer)
} else {
let header_length = DataFrameHeader::block_length() as usize;
if buffer.len() < header_length {
Ok(None)
} else {
let header = buffer.split_to(header_length);
let mut reader = io::Cursor::new(header);
let header = DataFrameHeader::from_bytes(&mut reader)?;
self.decode_frame(header, buffer)
}
}
}
}
impl Encoder for MultiplexedCodec {
type Item = (RequestId, TransportMessage);
type Error = io::Error;
fn encode(&mut self, request: Self::Item, buffer: &mut BytesMut) -> Result<(), io::Error> {
let (request_id, mut request) = request;
match request {
TransportMessage::RequestResponse(ref mut r) => {
r.request_header.request_id = request_id;
}
r => {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("Expected request response message but got {:?}", r),
))
}
};
let length = request.length();
if buffer.remaining_mut() < length {
buffer.reserve(length);
}
let mut writer = buffer.writer();
request.to_bytes(&mut writer)
}
}
struct MultiplexedProto;
impl<T: AsyncRead + AsyncWrite +'static> ClientProto<T> for MultiplexedProto {
type Request = TransportMessage;
type Response = TransportMessage;
type Transport = Framed<T, MultiplexedCodec>;
type BindTransport = Result<Self::Transport, io::Error>;
fn bind_transport(&self, io: T) -> Self::BindTransport {
Ok(io.framed(MultiplexedCodec { header: None }))
}
}
| MultiplexedClient | identifier_name |
expr-match-panic.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn | () {
let r = match true { true => { true } false => { panic!() } };
assert_eq!(r, true);
}
fn test_box() {
let r = match true { true => { vec!(10) } false => { panic!() } };
assert_eq!(r[0], 10);
}
pub fn main() { test_simple(); test_box(); }
| test_simple | identifier_name |
expr-match-panic.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn test_simple() {
let r = match true { true => | false => { panic!() } };
assert_eq!(r, true);
}
fn test_box() {
let r = match true { true => { vec!(10) } false => { panic!() } };
assert_eq!(r[0], 10);
}
pub fn main() { test_simple(); test_box(); }
| { true } | conditional_block |
expr-match-panic.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn test_simple() {
let r = match true { true => { true } false => { panic!() } };
assert_eq!(r, true);
}
fn test_box() { | }
pub fn main() { test_simple(); test_box(); } | let r = match true { true => { vec!(10) } false => { panic!() } };
assert_eq!(r[0], 10); | random_line_split |
expr-match-panic.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn test_simple() {
let r = match true { true => { true } false => { panic!() } };
assert_eq!(r, true);
}
fn test_box() |
pub fn main() { test_simple(); test_box(); }
| {
let r = match true { true => { vec!(10) } false => { panic!() } };
assert_eq!(r[0], 10);
} | identifier_body |
last-use-in-cap-clause.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Make sure #1399 stays fixed
struct | { a: ~int }
fn foo() -> @fn() -> int {
let k = ~22;
let _u = A {a: k.clone()};
let result: @fn() -> int = || 22;
result
}
pub fn main() {
assert!(foo()() == 22);
}
| A | identifier_name |
agent.rs | #![allow(non_snake_case)]
use std::collections::HashMap;
use request::Handler;
use serde_json;
use error::ConsulResult;
use std::error::Error;
use super::{Service, RegisterService, TtlHealthCheck};
/// Agent can be used to query the Agent endpoints
pub struct Agent{
handler: Handler
}
/// AgentMember represents a cluster member known to the agent
#[derive(Serialize, Deserialize)]
pub struct AgentMember {
Name: String,
Addr: String,
Port: u16,
Tags: HashMap<String, String>,
Status: usize,
ProtocolMin: u8,
ProtocolMax: u8,
ProtocolCur: u8,
DelegateMin: u8,
DelegateMax: u8,
DelegateCur: u8
}
impl Agent {
pub fn new(address: &str) -> Agent {
Agent {
handler: Handler::new(&format!("{}/v1/agent", address))
}
}
pub fn services(&self) -> ConsulResult<HashMap<String, Service>> {
let result = self.handler.get("services")?;
serde_json::from_str(&result)
.map_err(|e| e.description().to_owned())
}
pub fn members(&self) -> ConsulResult<Vec<AgentMember>> {
let result = self.handler.get("members")?;
serde_json::from_str(&result)
.map_err(|e| e.description().to_owned())
}
pub fn register(&self, service: RegisterService) -> ConsulResult<()> {
let json_str = serde_json::to_string(&service)
.map_err(|e| e.description().to_owned())?;
if let Err(e) = self.handler.put("service/register", json_str, Some("application/json")) {
Err(format!("Consul: Error registering a service. Err:{}", e))
}
else {
Ok(())
}
}
pub fn | (&self, health_check: TtlHealthCheck) -> ConsulResult<()> {
let json_str = serde_json::to_string(&health_check)
.map_err(|e| e.description().to_owned())?;
if let Err(e) = self.handler.put("check/register", json_str, Some("application/json")) {
Err(format!("Consul: Error registering a health check. Err:{}", e))
}
else {
Ok(())
}
}
pub fn check_pass(&self, service_id: String) -> ConsulResult<()> {
let uri = format!("check/pass/{}", service_id);
self.handler.get(&uri)?;
Ok(())
}
pub fn get_self_name(&self) -> ConsulResult<Option<String>> {
let result = self.handler.get("self")?;
let json_data = serde_json::from_str(&result)
.map_err(|e| e.description().to_owned())?;
Ok(super::get_string(&json_data, &["Config", "NodeName"]))
}
pub fn get_self_address(&self) -> ConsulResult<Option<String>> {
let result = self.handler.get("self")?;
let json_data = serde_json::from_str(&result)
.map_err(|e| e.description().to_owned())?;
Ok(super::get_string(&json_data, &["Config", "AdvertiseAddr"]))
}
}
| register_ttl_check | identifier_name |
agent.rs | #![allow(non_snake_case)]
use std::collections::HashMap;
use request::Handler;
use serde_json;
use error::ConsulResult;
use std::error::Error;
use super::{Service, RegisterService, TtlHealthCheck};
/// Agent can be used to query the Agent endpoints
pub struct Agent{
handler: Handler
}
/// AgentMember represents a cluster member known to the agent
#[derive(Serialize, Deserialize)]
pub struct AgentMember {
Name: String,
Addr: String,
Port: u16,
Tags: HashMap<String, String>,
Status: usize,
ProtocolMin: u8,
ProtocolMax: u8,
ProtocolCur: u8,
DelegateMin: u8,
DelegateMax: u8,
DelegateCur: u8
}
impl Agent {
pub fn new(address: &str) -> Agent {
Agent {
handler: Handler::new(&format!("{}/v1/agent", address))
}
}
pub fn services(&self) -> ConsulResult<HashMap<String, Service>> {
let result = self.handler.get("services")?;
serde_json::from_str(&result)
.map_err(|e| e.description().to_owned())
}
pub fn members(&self) -> ConsulResult<Vec<AgentMember>> {
let result = self.handler.get("members")?;
serde_json::from_str(&result)
.map_err(|e| e.description().to_owned())
}
pub fn register(&self, service: RegisterService) -> ConsulResult<()> {
let json_str = serde_json::to_string(&service)
.map_err(|e| e.description().to_owned())?;
if let Err(e) = self.handler.put("service/register", json_str, Some("application/json")) |
else {
Ok(())
}
}
pub fn register_ttl_check(&self, health_check: TtlHealthCheck) -> ConsulResult<()> {
let json_str = serde_json::to_string(&health_check)
.map_err(|e| e.description().to_owned())?;
if let Err(e) = self.handler.put("check/register", json_str, Some("application/json")) {
Err(format!("Consul: Error registering a health check. Err:{}", e))
}
else {
Ok(())
}
}
pub fn check_pass(&self, service_id: String) -> ConsulResult<()> {
let uri = format!("check/pass/{}", service_id);
self.handler.get(&uri)?;
Ok(())
}
pub fn get_self_name(&self) -> ConsulResult<Option<String>> {
let result = self.handler.get("self")?;
let json_data = serde_json::from_str(&result)
.map_err(|e| e.description().to_owned())?;
Ok(super::get_string(&json_data, &["Config", "NodeName"]))
}
pub fn get_self_address(&self) -> ConsulResult<Option<String>> {
let result = self.handler.get("self")?;
let json_data = serde_json::from_str(&result)
.map_err(|e| e.description().to_owned())?;
Ok(super::get_string(&json_data, &["Config", "AdvertiseAddr"]))
}
}
| {
Err(format!("Consul: Error registering a service. Err:{}", e))
} | conditional_block |
agent.rs | #![allow(non_snake_case)]
use std::collections::HashMap;
use request::Handler;
use serde_json;
use error::ConsulResult;
use std::error::Error;
use super::{Service, RegisterService, TtlHealthCheck};
/// Agent can be used to query the Agent endpoints
pub struct Agent{
handler: Handler
}
/// AgentMember represents a cluster member known to the agent
#[derive(Serialize, Deserialize)]
pub struct AgentMember {
Name: String,
Addr: String,
Port: u16,
Tags: HashMap<String, String>,
Status: usize,
ProtocolMin: u8,
ProtocolMax: u8,
ProtocolCur: u8,
DelegateMin: u8,
DelegateMax: u8,
DelegateCur: u8
}
impl Agent {
pub fn new(address: &str) -> Agent {
Agent {
handler: Handler::new(&format!("{}/v1/agent", address))
}
}
pub fn services(&self) -> ConsulResult<HashMap<String, Service>> {
let result = self.handler.get("services")?;
serde_json::from_str(&result)
.map_err(|e| e.description().to_owned())
}
pub fn members(&self) -> ConsulResult<Vec<AgentMember>> {
let result = self.handler.get("members")?;
serde_json::from_str(&result)
.map_err(|e| e.description().to_owned())
}
pub fn register(&self, service: RegisterService) -> ConsulResult<()> {
let json_str = serde_json::to_string(&service)
.map_err(|e| e.description().to_owned())?;
if let Err(e) = self.handler.put("service/register", json_str, Some("application/json")) {
Err(format!("Consul: Error registering a service. Err:{}", e))
}
else {
Ok(())
}
}
pub fn register_ttl_check(&self, health_check: TtlHealthCheck) -> ConsulResult<()> {
let json_str = serde_json::to_string(&health_check)
.map_err(|e| e.description().to_owned())?;
if let Err(e) = self.handler.put("check/register", json_str, Some("application/json")) {
Err(format!("Consul: Error registering a health check. Err:{}", e))
}
else {
Ok(())
}
}
pub fn check_pass(&self, service_id: String) -> ConsulResult<()> {
let uri = format!("check/pass/{}", service_id);
self.handler.get(&uri)?;
Ok(())
}
pub fn get_self_name(&self) -> ConsulResult<Option<String>> {
let result = self.handler.get("self")?;
let json_data = serde_json::from_str(&result)
.map_err(|e| e.description().to_owned())?;
Ok(super::get_string(&json_data, &["Config", "NodeName"]))
}
pub fn get_self_address(&self) -> ConsulResult<Option<String>> |
}
| {
let result = self.handler.get("self")?;
let json_data = serde_json::from_str(&result)
.map_err(|e| e.description().to_owned())?;
Ok(super::get_string(&json_data, &["Config", "AdvertiseAddr"]))
} | identifier_body |
agent.rs | #![allow(non_snake_case)]
use std::collections::HashMap;
use request::Handler;
use serde_json;
use error::ConsulResult;
use std::error::Error;
|
/// Agent can be used to query the Agent endpoints
pub struct Agent{
handler: Handler
}
/// AgentMember represents a cluster member known to the agent
#[derive(Serialize, Deserialize)]
pub struct AgentMember {
Name: String,
Addr: String,
Port: u16,
Tags: HashMap<String, String>,
Status: usize,
ProtocolMin: u8,
ProtocolMax: u8,
ProtocolCur: u8,
DelegateMin: u8,
DelegateMax: u8,
DelegateCur: u8
}
impl Agent {
pub fn new(address: &str) -> Agent {
Agent {
handler: Handler::new(&format!("{}/v1/agent", address))
}
}
pub fn services(&self) -> ConsulResult<HashMap<String, Service>> {
let result = self.handler.get("services")?;
serde_json::from_str(&result)
.map_err(|e| e.description().to_owned())
}
pub fn members(&self) -> ConsulResult<Vec<AgentMember>> {
let result = self.handler.get("members")?;
serde_json::from_str(&result)
.map_err(|e| e.description().to_owned())
}
pub fn register(&self, service: RegisterService) -> ConsulResult<()> {
let json_str = serde_json::to_string(&service)
.map_err(|e| e.description().to_owned())?;
if let Err(e) = self.handler.put("service/register", json_str, Some("application/json")) {
Err(format!("Consul: Error registering a service. Err:{}", e))
}
else {
Ok(())
}
}
pub fn register_ttl_check(&self, health_check: TtlHealthCheck) -> ConsulResult<()> {
let json_str = serde_json::to_string(&health_check)
.map_err(|e| e.description().to_owned())?;
if let Err(e) = self.handler.put("check/register", json_str, Some("application/json")) {
Err(format!("Consul: Error registering a health check. Err:{}", e))
}
else {
Ok(())
}
}
pub fn check_pass(&self, service_id: String) -> ConsulResult<()> {
let uri = format!("check/pass/{}", service_id);
self.handler.get(&uri)?;
Ok(())
}
pub fn get_self_name(&self) -> ConsulResult<Option<String>> {
let result = self.handler.get("self")?;
let json_data = serde_json::from_str(&result)
.map_err(|e| e.description().to_owned())?;
Ok(super::get_string(&json_data, &["Config", "NodeName"]))
}
pub fn get_self_address(&self) -> ConsulResult<Option<String>> {
let result = self.handler.get("self")?;
let json_data = serde_json::from_str(&result)
.map_err(|e| e.description().to_owned())?;
Ok(super::get_string(&json_data, &["Config", "AdvertiseAddr"]))
}
} | use super::{Service, RegisterService, TtlHealthCheck}; | random_line_split |
main.rs | // Copyright (C) 2015, Alberto Corona <[email protected]>
// All rights reserved. This file is part of core-utils, distributed under the
// GPL v3 license. For full terms please see the LICENSE file.
#![crate_type = "bin"]
#![crate_name = "rm"]
#![feature(path_ext)]
static VERS: &'static str = "0.1.0";
static PROG: &'static str = "rm";
extern crate getopts;
extern crate util;
use getopts::{Options};
use util::{Status};
use std::env;
use std::fs;
use std::fs::{PathExt};
use std::path::{PathBuf};
fn exists(path: &PathBuf) -> bool {
if!path.exists() {
util::err(PROG, Status::Error, String::from("cannot remove '".to_string()
+ path.to_str().unwrap() +
"': No such file or directory"
));
}
return true;
}
fn rm_file(file: &PathBuf, verbose: bool) {
match fs::remove_file(file) {
Ok(_) => {
if verbose {
println!("Removed: '{}'", file.display());
}
},
Err(e) => {
util::err(PROG, Status::Error, e.to_string());
panic!();
}
};
}
fn rm_dir(dir: &PathBuf, recurse: bool, verbose: bool) {
if recurse {
match fs::remove_dir_all(dir) {
Ok(_) => {
if verbose {
println!("Removed: '{}'", dir.display());
}
},
Err(e) => {
util::err(PROG, Status::Error, e.to_string());
panic!();
}
};
} else {
match fs::remove_dir(dir) {
Ok(_) => {
if verbose {
println!("Removed: '{}'", dir.display());
}
},
Err(e) => {
util::err(PROG, Status::Error, e.to_string());
panic!();
}
};
}
}
fn rm_target(path: &PathBuf, recurse: bool, verbose: bool) {
if exists(path) {
if path.is_file() {
rm_file(path, verbose);
} else if path.is_dir() |
}
}
fn print_usage(opts: Options) {
print!("Usage: rm [OPTION...] FILE...\n\
Remove FILE(s) {}\n\
Examples:
rm -r dir\tDeletes `dir` and all of its contents", opts.usage(""));
}
fn main() {
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
opts.optflag("v", "verbose", "Verbosely print operation");
opts.optflag("r", "recursive", "Remove a directory and all of its contents");
opts.optflag("h", "help", "Print the help menu");
opts.optflag("", "version", "Print the version of rm");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m },
Err(e) => {
util::err(PROG, Status::OptError, e.to_string());
panic!(e.to_string())
}
};
let verb = matches.opt_present("v");
let recurse = matches.opt_present("r");
if matches.opt_present("h") {
print_usage(opts);
} else if matches.opt_present("version") {
util::copyright(PROG, VERS, "2015", vec!["Alberto Corona"]);
} else if!matches.free.is_empty() {
for item in matches.free.iter() {
rm_target(&PathBuf::from(&item), recurse, verb);
}
} else if matches.free.is_empty() {
util::prog_try(PROG);
}
}
| {
rm_dir(path, recurse, verbose);
} | conditional_block |
main.rs | // Copyright (C) 2015, Alberto Corona <[email protected]>
// All rights reserved. This file is part of core-utils, distributed under the
// GPL v3 license. For full terms please see the LICENSE file.
#![crate_type = "bin"]
#![crate_name = "rm"]
#![feature(path_ext)]
static VERS: &'static str = "0.1.0";
static PROG: &'static str = "rm";
extern crate getopts;
extern crate util;
use getopts::{Options};
use util::{Status};
use std::env;
use std::fs;
use std::fs::{PathExt};
use std::path::{PathBuf};
fn exists(path: &PathBuf) -> bool {
if!path.exists() {
util::err(PROG, Status::Error, String::from("cannot remove '".to_string()
+ path.to_str().unwrap() +
"': No such file or directory"
));
}
return true;
}
fn rm_file(file: &PathBuf, verbose: bool) |
fn rm_dir(dir: &PathBuf, recurse: bool, verbose: bool) {
if recurse {
match fs::remove_dir_all(dir) {
Ok(_) => {
if verbose {
println!("Removed: '{}'", dir.display());
}
},
Err(e) => {
util::err(PROG, Status::Error, e.to_string());
panic!();
}
};
} else {
match fs::remove_dir(dir) {
Ok(_) => {
if verbose {
println!("Removed: '{}'", dir.display());
}
},
Err(e) => {
util::err(PROG, Status::Error, e.to_string());
panic!();
}
};
}
}
fn rm_target(path: &PathBuf, recurse: bool, verbose: bool) {
if exists(path) {
if path.is_file() {
rm_file(path, verbose);
} else if path.is_dir() {
rm_dir(path, recurse, verbose);
}
}
}
fn print_usage(opts: Options) {
print!("Usage: rm [OPTION...] FILE...\n\
Remove FILE(s) {}\n\
Examples:
rm -r dir\tDeletes `dir` and all of its contents", opts.usage(""));
}
fn main() {
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
opts.optflag("v", "verbose", "Verbosely print operation");
opts.optflag("r", "recursive", "Remove a directory and all of its contents");
opts.optflag("h", "help", "Print the help menu");
opts.optflag("", "version", "Print the version of rm");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m },
Err(e) => {
util::err(PROG, Status::OptError, e.to_string());
panic!(e.to_string())
}
};
let verb = matches.opt_present("v");
let recurse = matches.opt_present("r");
if matches.opt_present("h") {
print_usage(opts);
} else if matches.opt_present("version") {
util::copyright(PROG, VERS, "2015", vec!["Alberto Corona"]);
} else if!matches.free.is_empty() {
for item in matches.free.iter() {
rm_target(&PathBuf::from(&item), recurse, verb);
}
} else if matches.free.is_empty() {
util::prog_try(PROG);
}
}
| {
match fs::remove_file(file) {
Ok(_) => {
if verbose {
println!("Removed: '{}'", file.display());
}
},
Err(e) => {
util::err(PROG, Status::Error, e.to_string());
panic!();
}
};
} | identifier_body |
main.rs | // Copyright (C) 2015, Alberto Corona <[email protected]>
// All rights reserved. This file is part of core-utils, distributed under the
// GPL v3 license. For full terms please see the LICENSE file.
#![crate_type = "bin"]
#![crate_name = "rm"]
#![feature(path_ext)]
static VERS: &'static str = "0.1.0";
static PROG: &'static str = "rm";
extern crate getopts;
extern crate util;
use getopts::{Options};
use util::{Status};
use std::env;
use std::fs;
use std::fs::{PathExt};
use std::path::{PathBuf};
fn exists(path: &PathBuf) -> bool {
if!path.exists() {
util::err(PROG, Status::Error, String::from("cannot remove '".to_string()
+ path.to_str().unwrap() +
"': No such file or directory"
));
}
return true;
}
fn rm_file(file: &PathBuf, verbose: bool) {
match fs::remove_file(file) {
Ok(_) => {
if verbose {
println!("Removed: '{}'", file.display());
}
},
Err(e) => {
util::err(PROG, Status::Error, e.to_string());
panic!();
}
};
}
fn rm_dir(dir: &PathBuf, recurse: bool, verbose: bool) {
if recurse {
match fs::remove_dir_all(dir) {
Ok(_) => {
if verbose {
println!("Removed: '{}'", dir.display());
}
},
Err(e) => {
util::err(PROG, Status::Error, e.to_string());
panic!();
}
};
} else {
match fs::remove_dir(dir) {
Ok(_) => {
if verbose {
println!("Removed: '{}'", dir.display());
}
},
Err(e) => {
util::err(PROG, Status::Error, e.to_string());
panic!();
}
};
}
}
fn rm_target(path: &PathBuf, recurse: bool, verbose: bool) {
if exists(path) {
if path.is_file() {
rm_file(path, verbose);
} else if path.is_dir() {
rm_dir(path, recurse, verbose);
}
}
}
fn print_usage(opts: Options) {
print!("Usage: rm [OPTION...] FILE...\n\
Remove FILE(s) {}\n\
Examples:
rm -r dir\tDeletes `dir` and all of its contents", opts.usage(""));
}
fn main() {
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
opts.optflag("v", "verbose", "Verbosely print operation");
opts.optflag("r", "recursive", "Remove a directory and all of its contents");
opts.optflag("h", "help", "Print the help menu");
opts.optflag("", "version", "Print the version of rm");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m },
Err(e) => {
util::err(PROG, Status::OptError, e.to_string());
panic!(e.to_string())
}
};
let verb = matches.opt_present("v");
let recurse = matches.opt_present("r");
if matches.opt_present("h") {
print_usage(opts); | for item in matches.free.iter() {
rm_target(&PathBuf::from(&item), recurse, verb);
}
} else if matches.free.is_empty() {
util::prog_try(PROG);
}
} | } else if matches.opt_present("version") {
util::copyright(PROG, VERS, "2015", vec!["Alberto Corona"]);
} else if !matches.free.is_empty() { | random_line_split |
main.rs | // Copyright (C) 2015, Alberto Corona <[email protected]>
// All rights reserved. This file is part of core-utils, distributed under the
// GPL v3 license. For full terms please see the LICENSE file.
#![crate_type = "bin"]
#![crate_name = "rm"]
#![feature(path_ext)]
static VERS: &'static str = "0.1.0";
static PROG: &'static str = "rm";
extern crate getopts;
extern crate util;
use getopts::{Options};
use util::{Status};
use std::env;
use std::fs;
use std::fs::{PathExt};
use std::path::{PathBuf};
fn exists(path: &PathBuf) -> bool {
if!path.exists() {
util::err(PROG, Status::Error, String::from("cannot remove '".to_string()
+ path.to_str().unwrap() +
"': No such file or directory"
));
}
return true;
}
fn | (file: &PathBuf, verbose: bool) {
match fs::remove_file(file) {
Ok(_) => {
if verbose {
println!("Removed: '{}'", file.display());
}
},
Err(e) => {
util::err(PROG, Status::Error, e.to_string());
panic!();
}
};
}
fn rm_dir(dir: &PathBuf, recurse: bool, verbose: bool) {
if recurse {
match fs::remove_dir_all(dir) {
Ok(_) => {
if verbose {
println!("Removed: '{}'", dir.display());
}
},
Err(e) => {
util::err(PROG, Status::Error, e.to_string());
panic!();
}
};
} else {
match fs::remove_dir(dir) {
Ok(_) => {
if verbose {
println!("Removed: '{}'", dir.display());
}
},
Err(e) => {
util::err(PROG, Status::Error, e.to_string());
panic!();
}
};
}
}
fn rm_target(path: &PathBuf, recurse: bool, verbose: bool) {
if exists(path) {
if path.is_file() {
rm_file(path, verbose);
} else if path.is_dir() {
rm_dir(path, recurse, verbose);
}
}
}
fn print_usage(opts: Options) {
print!("Usage: rm [OPTION...] FILE...\n\
Remove FILE(s) {}\n\
Examples:
rm -r dir\tDeletes `dir` and all of its contents", opts.usage(""));
}
fn main() {
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
opts.optflag("v", "verbose", "Verbosely print operation");
opts.optflag("r", "recursive", "Remove a directory and all of its contents");
opts.optflag("h", "help", "Print the help menu");
opts.optflag("", "version", "Print the version of rm");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m },
Err(e) => {
util::err(PROG, Status::OptError, e.to_string());
panic!(e.to_string())
}
};
let verb = matches.opt_present("v");
let recurse = matches.opt_present("r");
if matches.opt_present("h") {
print_usage(opts);
} else if matches.opt_present("version") {
util::copyright(PROG, VERS, "2015", vec!["Alberto Corona"]);
} else if!matches.free.is_empty() {
for item in matches.free.iter() {
rm_target(&PathBuf::from(&item), recurse, verb);
}
} else if matches.free.is_empty() {
util::prog_try(PROG);
}
}
| rm_file | identifier_name |
draw.rs | use nannou::prelude::*;
fn main() |
fn view(app: &App, frame: Frame) {
// Begin drawing
let draw = app.draw();
// Clear the background to blue.
draw.background().color(CORNFLOWERBLUE);
// Draw a purple triangle in the top left half of the window.
let win = app.window_rect();
draw.tri()
.points(win.bottom_left(), win.top_left(), win.top_right())
.color(VIOLET);
// Draw an ellipse to follow the mouse.
let t = app.time;
draw.ellipse()
.x_y(app.mouse.x * t.cos(), app.mouse.y)
.radius(win.w() * 0.125 * t.sin())
.color(RED);
// Draw a line!
draw.line()
.weight(10.0 + (t.sin() * 0.5 + 0.5) * 90.0)
.caps_round()
.color(PALEGOLDENROD)
.points(win.top_left() * t.sin(), win.bottom_right() * t.cos());
// Draw a quad that follows the inverse of the ellipse.
draw.quad()
.x_y(-app.mouse.x, app.mouse.y)
.color(DARKGREEN)
.rotate(t);
// Draw a rect that follows a different inverse of the ellipse.
draw.rect()
.x_y(app.mouse.y, app.mouse.x)
.w(app.mouse.x * 0.25)
.hsv(t, 1.0, 1.0);
// Write the result of our drawing to the window's frame.
draw.to_frame(app, &frame).unwrap();
}
| {
nannou::sketch(view).run()
} | identifier_body |
draw.rs | use nannou::prelude::*;
fn main() {
nannou::sketch(view).run()
}
fn view(app: &App, frame: Frame) {
// Begin drawing
let draw = app.draw();
// Clear the background to blue.
draw.background().color(CORNFLOWERBLUE);
// Draw a purple triangle in the top left half of the window.
let win = app.window_rect();
draw.tri()
.points(win.bottom_left(), win.top_left(), win.top_right())
.color(VIOLET);
// Draw an ellipse to follow the mouse.
let t = app.time;
draw.ellipse()
.x_y(app.mouse.x * t.cos(), app.mouse.y) | draw.line()
.weight(10.0 + (t.sin() * 0.5 + 0.5) * 90.0)
.caps_round()
.color(PALEGOLDENROD)
.points(win.top_left() * t.sin(), win.bottom_right() * t.cos());
// Draw a quad that follows the inverse of the ellipse.
draw.quad()
.x_y(-app.mouse.x, app.mouse.y)
.color(DARKGREEN)
.rotate(t);
// Draw a rect that follows a different inverse of the ellipse.
draw.rect()
.x_y(app.mouse.y, app.mouse.x)
.w(app.mouse.x * 0.25)
.hsv(t, 1.0, 1.0);
// Write the result of our drawing to the window's frame.
draw.to_frame(app, &frame).unwrap();
} | .radius(win.w() * 0.125 * t.sin())
.color(RED);
// Draw a line! | random_line_split |
draw.rs | use nannou::prelude::*;
fn | () {
nannou::sketch(view).run()
}
fn view(app: &App, frame: Frame) {
// Begin drawing
let draw = app.draw();
// Clear the background to blue.
draw.background().color(CORNFLOWERBLUE);
// Draw a purple triangle in the top left half of the window.
let win = app.window_rect();
draw.tri()
.points(win.bottom_left(), win.top_left(), win.top_right())
.color(VIOLET);
// Draw an ellipse to follow the mouse.
let t = app.time;
draw.ellipse()
.x_y(app.mouse.x * t.cos(), app.mouse.y)
.radius(win.w() * 0.125 * t.sin())
.color(RED);
// Draw a line!
draw.line()
.weight(10.0 + (t.sin() * 0.5 + 0.5) * 90.0)
.caps_round()
.color(PALEGOLDENROD)
.points(win.top_left() * t.sin(), win.bottom_right() * t.cos());
// Draw a quad that follows the inverse of the ellipse.
draw.quad()
.x_y(-app.mouse.x, app.mouse.y)
.color(DARKGREEN)
.rotate(t);
// Draw a rect that follows a different inverse of the ellipse.
draw.rect()
.x_y(app.mouse.y, app.mouse.x)
.w(app.mouse.x * 0.25)
.hsv(t, 1.0, 1.0);
// Write the result of our drawing to the window's frame.
draw.to_frame(app, &frame).unwrap();
}
| main | identifier_name |
option-like-enum.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// min-lldb-version: 310
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// gdb-command:print some
// gdb-check:$1 = {RUST$ENCODED$ENUM$0$None = {__0 = 0x12345678}}
// gdb-command:print none
// gdb-check:$2 = {RUST$ENCODED$ENUM$0$None = {__0 = 0x0}}
// gdb-command:print full
// gdb-check:$3 = {RUST$ENCODED$ENUM$1$Empty = {__0 = 454545, __1 = 0x87654321, __2 = 9988}}
// gdb-command:print empty_gdb->discr
// gdb-check:$4 = (isize *) 0x0
// gdb-command:print droid
// gdb-check:$5 = {RUST$ENCODED$ENUM$2$Void = {id = 675675, range = 10000001, internals = 0x43218765}}
// gdb-command:print void_droid_gdb->internals
// gdb-check:$6 = (isize *) 0x0
// gdb-command:print nested_non_zero_yep
// gdb-check:$7 = {RUST$ENCODED$ENUM$1$2$Nope = {__0 = 10.5, __1 = {a = 10, b = 20, c = [...]}}}
// gdb-command:print nested_non_zero_nope
// gdb-check:$8 = {RUST$ENCODED$ENUM$1$2$Nope = {__0 = [...], __1 = {a = [...], b = [...], c = 0x0}}}
// gdb-command:continue
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print some
// lldb-check:[...]$0 = Some(&0x12345678)
// lldb-command:print none
// lldb-check:[...]$1 = None
// lldb-command:print full
// lldb-check:[...]$2 = Full(454545, &0x87654321, 9988)
// lldb-command:print empty
// lldb-check:[...]$3 = Empty
// lldb-command:print droid
// lldb-check:[...]$4 = Droid { id: 675675, range: 10000001, internals: &0x43218765 }
// lldb-command:print void_droid
// lldb-check:[...]$5 = Void
// lldb-command:print some_str
// lldb-check:[...]$6 = Some(&str { data_ptr: [...], length: 3 })
// lldb-command:print none_str
// lldb-check:[...]$7 = None
// lldb-command:print nested_non_zero_yep
// lldb-check:[...]$8 = Yep(10.5, NestedNonZeroField { a: 10, b: 20, c: &[...] })
// lldb-command:print nested_non_zero_nope
// lldb-check:[...]$9 = Nope
#![omit_gdb_pretty_printer_section]
// If a struct has exactly two variants, one of them is empty, and the other one
// contains a non-nullable pointer, then this value is used as the discriminator.
// The test cases in this file make sure that something readable is generated for
// this kind of types.
// If the non-empty variant contains a single non-nullable pointer than the whole
// item is represented as just a pointer and not wrapped in a struct.
// Unfortunately (for these test cases) the content of the non-discriminant fields
// in the null-case is not defined. So we just read the discriminator field in
// this case (by casting the value to a memory-equivalent struct).
enum MoreFields<'a> {
Full(u32, &'a isize, i16),
Empty
}
struct MoreFieldsRepr<'a> {
a: u32,
discr: &'a isize,
b: i16
}
enum NamedFields<'a> {
Droid { id: i32, range: i64, internals: &'a isize },
Void
}
struct NamedFieldsRepr<'a> {
id: i32,
range: i64,
internals: &'a isize
}
struct NestedNonZeroField<'a> {
a: u16,
b: u32,
c: &'a char,
}
enum NestedNonZero<'a> {
Yep(f64, NestedNonZeroField<'a>),
Nope
}
fn | () {
let some_str: Option<&'static str> = Some("abc");
let none_str: Option<&'static str> = None;
let some: Option<&u32> = Some(unsafe { std::mem::transmute(0x12345678_usize) });
let none: Option<&u32> = None;
let full = MoreFields::Full(454545, unsafe { std::mem::transmute(0x87654321_usize) }, 9988);
let empty = MoreFields::Empty;
let empty_gdb: &MoreFieldsRepr = unsafe { std::mem::transmute(&MoreFields::Empty) };
let droid = NamedFields::Droid {
id: 675675,
range: 10000001,
internals: unsafe { std::mem::transmute(0x43218765_usize) }
};
let void_droid = NamedFields::Void;
let void_droid_gdb: &NamedFieldsRepr = unsafe { std::mem::transmute(&NamedFields::Void) };
let x = 'x';
let nested_non_zero_yep = NestedNonZero::Yep(
10.5,
NestedNonZeroField {
a: 10,
b: 20,
c: &x
});
let nested_non_zero_nope = NestedNonZero::Nope;
zzz(); // #break
}
fn zzz() {()}
| main | identifier_name |
option-like-enum.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// min-lldb-version: 310
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// gdb-command:print some
// gdb-check:$1 = {RUST$ENCODED$ENUM$0$None = {__0 = 0x12345678}}
// gdb-command:print none
// gdb-check:$2 = {RUST$ENCODED$ENUM$0$None = {__0 = 0x0}}
// gdb-command:print full
// gdb-check:$3 = {RUST$ENCODED$ENUM$1$Empty = {__0 = 454545, __1 = 0x87654321, __2 = 9988}}
// gdb-command:print empty_gdb->discr
// gdb-check:$4 = (isize *) 0x0
// gdb-command:print droid
// gdb-check:$5 = {RUST$ENCODED$ENUM$2$Void = {id = 675675, range = 10000001, internals = 0x43218765}}
// gdb-command:print void_droid_gdb->internals
// gdb-check:$6 = (isize *) 0x0
// gdb-command:print nested_non_zero_yep
// gdb-check:$7 = {RUST$ENCODED$ENUM$1$2$Nope = {__0 = 10.5, __1 = {a = 10, b = 20, c = [...]}}}
// gdb-command:print nested_non_zero_nope
// gdb-check:$8 = {RUST$ENCODED$ENUM$1$2$Nope = {__0 = [...], __1 = {a = [...], b = [...], c = 0x0}}}
// gdb-command:continue
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print some
// lldb-check:[...]$0 = Some(&0x12345678)
// lldb-command:print none
// lldb-check:[...]$1 = None
// lldb-command:print full
// lldb-check:[...]$2 = Full(454545, &0x87654321, 9988)
// lldb-command:print empty
// lldb-check:[...]$3 = Empty
// lldb-command:print droid
// lldb-check:[...]$4 = Droid { id: 675675, range: 10000001, internals: &0x43218765 }
// lldb-command:print void_droid
// lldb-check:[...]$5 = Void
// lldb-command:print some_str
// lldb-check:[...]$6 = Some(&str { data_ptr: [...], length: 3 })
// lldb-command:print none_str
// lldb-check:[...]$7 = None
// lldb-command:print nested_non_zero_yep
// lldb-check:[...]$8 = Yep(10.5, NestedNonZeroField { a: 10, b: 20, c: &[...] })
// lldb-command:print nested_non_zero_nope
// lldb-check:[...]$9 = Nope
#![omit_gdb_pretty_printer_section]
// If a struct has exactly two variants, one of them is empty, and the other one
// contains a non-nullable pointer, then this value is used as the discriminator.
// The test cases in this file make sure that something readable is generated for
// this kind of types.
// If the non-empty variant contains a single non-nullable pointer than the whole
// item is represented as just a pointer and not wrapped in a struct.
// Unfortunately (for these test cases) the content of the non-discriminant fields
// in the null-case is not defined. So we just read the discriminator field in
// this case (by casting the value to a memory-equivalent struct).
enum MoreFields<'a> {
Full(u32, &'a isize, i16),
Empty
}
struct MoreFieldsRepr<'a> {
a: u32,
discr: &'a isize,
b: i16
}
enum NamedFields<'a> {
Droid { id: i32, range: i64, internals: &'a isize },
Void
}
struct NamedFieldsRepr<'a> {
id: i32,
range: i64,
internals: &'a isize
}
struct NestedNonZeroField<'a> {
a: u16,
b: u32,
c: &'a char,
}
enum NestedNonZero<'a> {
Yep(f64, NestedNonZeroField<'a>),
Nope
}
fn main() {
let some_str: Option<&'static str> = Some("abc");
let none_str: Option<&'static str> = None;
| let full = MoreFields::Full(454545, unsafe { std::mem::transmute(0x87654321_usize) }, 9988);
let empty = MoreFields::Empty;
let empty_gdb: &MoreFieldsRepr = unsafe { std::mem::transmute(&MoreFields::Empty) };
let droid = NamedFields::Droid {
id: 675675,
range: 10000001,
internals: unsafe { std::mem::transmute(0x43218765_usize) }
};
let void_droid = NamedFields::Void;
let void_droid_gdb: &NamedFieldsRepr = unsafe { std::mem::transmute(&NamedFields::Void) };
let x = 'x';
let nested_non_zero_yep = NestedNonZero::Yep(
10.5,
NestedNonZeroField {
a: 10,
b: 20,
c: &x
});
let nested_non_zero_nope = NestedNonZero::Nope;
zzz(); // #break
}
fn zzz() {()} | let some: Option<&u32> = Some(unsafe { std::mem::transmute(0x12345678_usize) });
let none: Option<&u32> = None;
| random_line_split |
option-like-enum.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// min-lldb-version: 310
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// gdb-command:print some
// gdb-check:$1 = {RUST$ENCODED$ENUM$0$None = {__0 = 0x12345678}}
// gdb-command:print none
// gdb-check:$2 = {RUST$ENCODED$ENUM$0$None = {__0 = 0x0}}
// gdb-command:print full
// gdb-check:$3 = {RUST$ENCODED$ENUM$1$Empty = {__0 = 454545, __1 = 0x87654321, __2 = 9988}}
// gdb-command:print empty_gdb->discr
// gdb-check:$4 = (isize *) 0x0
// gdb-command:print droid
// gdb-check:$5 = {RUST$ENCODED$ENUM$2$Void = {id = 675675, range = 10000001, internals = 0x43218765}}
// gdb-command:print void_droid_gdb->internals
// gdb-check:$6 = (isize *) 0x0
// gdb-command:print nested_non_zero_yep
// gdb-check:$7 = {RUST$ENCODED$ENUM$1$2$Nope = {__0 = 10.5, __1 = {a = 10, b = 20, c = [...]}}}
// gdb-command:print nested_non_zero_nope
// gdb-check:$8 = {RUST$ENCODED$ENUM$1$2$Nope = {__0 = [...], __1 = {a = [...], b = [...], c = 0x0}}}
// gdb-command:continue
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print some
// lldb-check:[...]$0 = Some(&0x12345678)
// lldb-command:print none
// lldb-check:[...]$1 = None
// lldb-command:print full
// lldb-check:[...]$2 = Full(454545, &0x87654321, 9988)
// lldb-command:print empty
// lldb-check:[...]$3 = Empty
// lldb-command:print droid
// lldb-check:[...]$4 = Droid { id: 675675, range: 10000001, internals: &0x43218765 }
// lldb-command:print void_droid
// lldb-check:[...]$5 = Void
// lldb-command:print some_str
// lldb-check:[...]$6 = Some(&str { data_ptr: [...], length: 3 })
// lldb-command:print none_str
// lldb-check:[...]$7 = None
// lldb-command:print nested_non_zero_yep
// lldb-check:[...]$8 = Yep(10.5, NestedNonZeroField { a: 10, b: 20, c: &[...] })
// lldb-command:print nested_non_zero_nope
// lldb-check:[...]$9 = Nope
#![omit_gdb_pretty_printer_section]
// If a struct has exactly two variants, one of them is empty, and the other one
// contains a non-nullable pointer, then this value is used as the discriminator.
// The test cases in this file make sure that something readable is generated for
// this kind of types.
// If the non-empty variant contains a single non-nullable pointer than the whole
// item is represented as just a pointer and not wrapped in a struct.
// Unfortunately (for these test cases) the content of the non-discriminant fields
// in the null-case is not defined. So we just read the discriminator field in
// this case (by casting the value to a memory-equivalent struct).
enum MoreFields<'a> {
Full(u32, &'a isize, i16),
Empty
}
struct MoreFieldsRepr<'a> {
a: u32,
discr: &'a isize,
b: i16
}
enum NamedFields<'a> {
Droid { id: i32, range: i64, internals: &'a isize },
Void
}
struct NamedFieldsRepr<'a> {
id: i32,
range: i64,
internals: &'a isize
}
struct NestedNonZeroField<'a> {
a: u16,
b: u32,
c: &'a char,
}
enum NestedNonZero<'a> {
Yep(f64, NestedNonZeroField<'a>),
Nope
}
fn main() {
let some_str: Option<&'static str> = Some("abc");
let none_str: Option<&'static str> = None;
let some: Option<&u32> = Some(unsafe { std::mem::transmute(0x12345678_usize) });
let none: Option<&u32> = None;
let full = MoreFields::Full(454545, unsafe { std::mem::transmute(0x87654321_usize) }, 9988);
let empty = MoreFields::Empty;
let empty_gdb: &MoreFieldsRepr = unsafe { std::mem::transmute(&MoreFields::Empty) };
let droid = NamedFields::Droid {
id: 675675,
range: 10000001,
internals: unsafe { std::mem::transmute(0x43218765_usize) }
};
let void_droid = NamedFields::Void;
let void_droid_gdb: &NamedFieldsRepr = unsafe { std::mem::transmute(&NamedFields::Void) };
let x = 'x';
let nested_non_zero_yep = NestedNonZero::Yep(
10.5,
NestedNonZeroField {
a: 10,
b: 20,
c: &x
});
let nested_non_zero_nope = NestedNonZero::Nope;
zzz(); // #break
}
fn zzz() | {()} | identifier_body |
|
position.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Generic types for CSS handling of specified and computed values of
//! [`position`](https://drafts.csswg.org/css-backgrounds-3/#position)
/// A generic type for representing a CSS [position](https://drafts.csswg.org/css-values/#position).
#[derive(
Animate,
Clone,
ComputeSquaredDistance,
Copy,
Debug,
MallocSizeOf,
PartialEq,
SpecifiedValueInfo,
ToAnimatedValue,
ToAnimatedZero,
ToComputedValue,
ToResolvedValue,
ToShmem,
)]
#[repr(C)]
pub struct GenericPosition<H, V> {
/// The horizontal component of position.
pub horizontal: H,
/// The vertical component of position.
pub vertical: V,
}
pub use self::GenericPosition as Position;
impl<H, V> Position<H, V> {
/// Returns a new position.
pub fn new(horizontal: H, vertical: V) -> Self {
Self {
horizontal,
vertical,
}
}
}
/// A generic type for representing an `Auto | <position>`.
/// This is used by <offset-anchor> for now.
/// https://drafts.fxtf.org/motion-1/#offset-anchor-property
#[derive(
Animate,
Clone,
ComputeSquaredDistance,
Copy,
Debug,
MallocSizeOf,
Parse,
PartialEq,
SpecifiedValueInfo,
ToAnimatedZero,
ToComputedValue,
ToCss,
ToResolvedValue,
ToShmem,
)]
#[repr(C, u8)]
pub enum GenericPositionOrAuto<Pos> {
/// The <position> value.
Position(Pos),
/// The keyword `auto`.
Auto,
}
pub use self::GenericPositionOrAuto as PositionOrAuto;
impl<Pos> PositionOrAuto<Pos> {
/// Return `auto`.
#[inline]
pub fn auto() -> Self {
PositionOrAuto::Auto
} | #[derive(
Animate,
Clone,
ComputeSquaredDistance,
Copy,
Debug,
MallocSizeOf,
PartialEq,
Parse,
SpecifiedValueInfo,
ToAnimatedZero,
ToComputedValue,
ToCss,
ToResolvedValue,
ToShmem,
)]
#[repr(C, u8)]
pub enum GenericZIndex<I> {
/// An integer value.
Integer(I),
/// The keyword `auto`.
Auto,
}
pub use self::GenericZIndex as ZIndex;
impl<Integer> ZIndex<Integer> {
/// Returns `auto`
#[inline]
pub fn auto() -> Self {
ZIndex::Auto
}
/// Returns whether `self` is `auto`.
#[inline]
pub fn is_auto(self) -> bool {
matches!(self, ZIndex::Auto)
}
/// Returns the integer value if it is an integer, or `auto`.
#[inline]
pub fn integer_or(self, auto: Integer) -> Integer {
match self {
ZIndex::Integer(n) => n,
ZIndex::Auto => auto,
}
}
} | }
/// A generic value for the `z-index` property. | random_line_split |
position.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Generic types for CSS handling of specified and computed values of
//! [`position`](https://drafts.csswg.org/css-backgrounds-3/#position)
/// A generic type for representing a CSS [position](https://drafts.csswg.org/css-values/#position).
#[derive(
Animate,
Clone,
ComputeSquaredDistance,
Copy,
Debug,
MallocSizeOf,
PartialEq,
SpecifiedValueInfo,
ToAnimatedValue,
ToAnimatedZero,
ToComputedValue,
ToResolvedValue,
ToShmem,
)]
#[repr(C)]
pub struct | <H, V> {
/// The horizontal component of position.
pub horizontal: H,
/// The vertical component of position.
pub vertical: V,
}
pub use self::GenericPosition as Position;
impl<H, V> Position<H, V> {
/// Returns a new position.
pub fn new(horizontal: H, vertical: V) -> Self {
Self {
horizontal,
vertical,
}
}
}
/// A generic type for representing an `Auto | <position>`.
/// This is used by <offset-anchor> for now.
/// https://drafts.fxtf.org/motion-1/#offset-anchor-property
#[derive(
Animate,
Clone,
ComputeSquaredDistance,
Copy,
Debug,
MallocSizeOf,
Parse,
PartialEq,
SpecifiedValueInfo,
ToAnimatedZero,
ToComputedValue,
ToCss,
ToResolvedValue,
ToShmem,
)]
#[repr(C, u8)]
pub enum GenericPositionOrAuto<Pos> {
/// The <position> value.
Position(Pos),
/// The keyword `auto`.
Auto,
}
pub use self::GenericPositionOrAuto as PositionOrAuto;
impl<Pos> PositionOrAuto<Pos> {
/// Return `auto`.
#[inline]
pub fn auto() -> Self {
PositionOrAuto::Auto
}
}
/// A generic value for the `z-index` property.
#[derive(
Animate,
Clone,
ComputeSquaredDistance,
Copy,
Debug,
MallocSizeOf,
PartialEq,
Parse,
SpecifiedValueInfo,
ToAnimatedZero,
ToComputedValue,
ToCss,
ToResolvedValue,
ToShmem,
)]
#[repr(C, u8)]
pub enum GenericZIndex<I> {
/// An integer value.
Integer(I),
/// The keyword `auto`.
Auto,
}
pub use self::GenericZIndex as ZIndex;
impl<Integer> ZIndex<Integer> {
/// Returns `auto`
#[inline]
pub fn auto() -> Self {
ZIndex::Auto
}
/// Returns whether `self` is `auto`.
#[inline]
pub fn is_auto(self) -> bool {
matches!(self, ZIndex::Auto)
}
/// Returns the integer value if it is an integer, or `auto`.
#[inline]
pub fn integer_or(self, auto: Integer) -> Integer {
match self {
ZIndex::Integer(n) => n,
ZIndex::Auto => auto,
}
}
}
| GenericPosition | identifier_name |
semantic_version.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Semantic version formatting and comparing.
/// A version value with strict meaning. Use `as_u32` to convert to a simple integer.
///
/// # Example
/// ```
/// extern crate util;
/// use util::semantic_version::*;
///
/// fn main() {
/// assert_eq!(SemanticVersion::new(1, 2, 3).as_u32(), 0x010203);
/// }
/// ```
pub struct | {
/// Major version - API/feature removals & breaking changes.
pub major: u8,
/// Minor version - API/feature additions.
pub minor: u8,
/// Tiny version - bug fixes.
pub tiny: u8,
}
impl SemanticVersion {
/// Create a new object.
pub fn new(major: u8, minor: u8, tiny: u8) -> SemanticVersion {
SemanticVersion {
major: major,
minor: minor,
tiny: tiny,
}
}
/// Convert to a `u32` representation.
pub fn as_u32(&self) -> u32 {
((self.major as u32) << 16) + ((self.minor as u32) << 8) + self.tiny as u32
}
}
// TODO: implement Eq, Comparison and Debug/Display for SemanticVersion.
| SemanticVersion | identifier_name |
semantic_version.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
| // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Semantic version formatting and comparing.
/// A version value with strict meaning. Use `as_u32` to convert to a simple integer.
///
/// # Example
/// ```
/// extern crate util;
/// use util::semantic_version::*;
///
/// fn main() {
/// assert_eq!(SemanticVersion::new(1, 2, 3).as_u32(), 0x010203);
/// }
/// ```
pub struct SemanticVersion {
/// Major version - API/feature removals & breaking changes.
pub major: u8,
/// Minor version - API/feature additions.
pub minor: u8,
/// Tiny version - bug fixes.
pub tiny: u8,
}
impl SemanticVersion {
/// Create a new object.
pub fn new(major: u8, minor: u8, tiny: u8) -> SemanticVersion {
SemanticVersion {
major: major,
minor: minor,
tiny: tiny,
}
}
/// Convert to a `u32` representation.
pub fn as_u32(&self) -> u32 {
((self.major as u32) << 16) + ((self.minor as u32) << 8) + self.tiny as u32
}
}
// TODO: implement Eq, Comparison and Debug/Display for SemanticVersion. | // You should have received a copy of the GNU General Public License | random_line_split |
rows.rs | use crate::{
Dao,
Value,
};
use serde_derive::{
Deserialize,
Serialize,
};
use std::slice;
/// use this to store data retrieved from the database
/// This is also slimmer than Vec<Dao> when serialized
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct Rows {
pub columns: Vec<String>,
pub data: Vec<Vec<Value>>,
/// can be optionally set, indicates how many total rows are there in the table
pub count: Option<usize>,
}
impl Rows {
pub fn empty() -> Self { Rows::new(vec![]) }
pub fn new(columns: Vec<String>) -> Self {
Rows {
columns,
data: vec![],
count: None,
}
}
pub fn push(&mut self, row: Vec<Value>) { self.data.push(row) }
/// Returns an iterator over the `Row`s.
pub fn iter(&self) -> Iter {
Iter {
columns: self.columns.clone(),
iter: self.data.iter(),
}
}
}
/// An iterator over `Row`s.
pub struct Iter<'a> {
columns: Vec<String>,
iter: slice::Iter<'a, Vec<Value>>,
}
impl<'a> Iterator for Iter<'a> {
type Item = Dao;
fn next(&mut self) -> Option<Dao> {
let next_row = self.iter.next();
if let Some(row) = next_row {
if!row.is_empty() {
let mut dao = Dao::new();
for (i, column) in self.columns.iter().enumerate() {
if let Some(value) = row.get(i) {
dao.insert_value(column, value);
}
}
Some(dao)
} else {
None
}
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
}
impl<'a> ExactSizeIterator for Iter<'a> {}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn iteration_count() {
let columns = vec!["id".to_string(), "username".to_string()];
let data: Vec<Vec<Value>> = vec![vec![1.into(), "ivanceras".into()]];
let rows = Rows {
columns,
data,
count: None,
};
assert_eq!(1, rows.iter().count());
}
#[test]
fn iteration_count2() |
#[test]
fn dao() {
let columns = vec!["id".to_string(), "username".to_string()];
let data: Vec<Vec<Value>> = vec![vec![1.into(), "ivanceras".into()]];
let rows = Rows {
columns,
data,
count: None,
};
let mut dao = Dao::new();
dao.insert("id", 1);
dao.insert("username", "ivanceras");
assert_eq!(dao, rows.iter().next().unwrap());
}
#[test]
fn dao2() {
let columns = vec!["id".to_string(), "username".to_string()];
let data: Vec<Vec<Value>> = vec![vec![1.into(), "ivanceras".into()], vec![
2.into(),
"lee".into(),
]];
let rows = Rows {
columns,
data,
count: None,
};
let mut iter = rows.iter();
let mut dao = Dao::new();
dao.insert("id", 1);
dao.insert("username", "ivanceras");
assert_eq!(dao, iter.next().unwrap());
let mut dao2 = Dao::new();
dao2.insert("id", 2);
dao2.insert("username", "lee");
assert_eq!(dao2, iter.next().unwrap());
}
}
| {
let columns = vec!["id".to_string(), "username".to_string()];
let data: Vec<Vec<Value>> = vec![vec![1.into(), "ivanceras".into()], vec![
2.into(),
"lee".into(),
]];
let rows = Rows {
columns,
data,
count: None,
};
assert_eq!(2, rows.iter().count());
} | identifier_body |
rows.rs | use crate::{
Dao,
Value,
};
use serde_derive::{
Deserialize,
Serialize,
};
use std::slice;
/// use this to store data retrieved from the database
/// This is also slimmer than Vec<Dao> when serialized
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct Rows {
pub columns: Vec<String>,
pub data: Vec<Vec<Value>>,
/// can be optionally set, indicates how many total rows are there in the table
pub count: Option<usize>,
}
impl Rows {
pub fn empty() -> Self { Rows::new(vec![]) }
pub fn new(columns: Vec<String>) -> Self {
Rows {
columns,
data: vec![],
count: None,
}
}
pub fn push(&mut self, row: Vec<Value>) { self.data.push(row) }
/// Returns an iterator over the `Row`s.
pub fn iter(&self) -> Iter {
Iter {
columns: self.columns.clone(),
iter: self.data.iter(),
}
}
}
/// An iterator over `Row`s.
pub struct Iter<'a> {
columns: Vec<String>,
iter: slice::Iter<'a, Vec<Value>>,
}
impl<'a> Iterator for Iter<'a> {
type Item = Dao;
fn next(&mut self) -> Option<Dao> {
let next_row = self.iter.next();
if let Some(row) = next_row {
if!row.is_empty() {
let mut dao = Dao::new();
for (i, column) in self.columns.iter().enumerate() {
if let Some(value) = row.get(i) |
}
Some(dao)
} else {
None
}
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
}
impl<'a> ExactSizeIterator for Iter<'a> {}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn iteration_count() {
let columns = vec!["id".to_string(), "username".to_string()];
let data: Vec<Vec<Value>> = vec![vec![1.into(), "ivanceras".into()]];
let rows = Rows {
columns,
data,
count: None,
};
assert_eq!(1, rows.iter().count());
}
#[test]
fn iteration_count2() {
let columns = vec!["id".to_string(), "username".to_string()];
let data: Vec<Vec<Value>> = vec![vec![1.into(), "ivanceras".into()], vec![
2.into(),
"lee".into(),
]];
let rows = Rows {
columns,
data,
count: None,
};
assert_eq!(2, rows.iter().count());
}
#[test]
fn dao() {
let columns = vec!["id".to_string(), "username".to_string()];
let data: Vec<Vec<Value>> = vec![vec![1.into(), "ivanceras".into()]];
let rows = Rows {
columns,
data,
count: None,
};
let mut dao = Dao::new();
dao.insert("id", 1);
dao.insert("username", "ivanceras");
assert_eq!(dao, rows.iter().next().unwrap());
}
#[test]
fn dao2() {
let columns = vec!["id".to_string(), "username".to_string()];
let data: Vec<Vec<Value>> = vec![vec![1.into(), "ivanceras".into()], vec![
2.into(),
"lee".into(),
]];
let rows = Rows {
columns,
data,
count: None,
};
let mut iter = rows.iter();
let mut dao = Dao::new();
dao.insert("id", 1);
dao.insert("username", "ivanceras");
assert_eq!(dao, iter.next().unwrap());
let mut dao2 = Dao::new();
dao2.insert("id", 2);
dao2.insert("username", "lee");
assert_eq!(dao2, iter.next().unwrap());
}
}
| {
dao.insert_value(column, value);
} | conditional_block |
rows.rs | use crate::{
Dao,
Value,
};
use serde_derive::{
Deserialize,
Serialize,
};
use std::slice;
/// use this to store data retrieved from the database
/// This is also slimmer than Vec<Dao> when serialized
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct Rows {
pub columns: Vec<String>,
pub data: Vec<Vec<Value>>,
/// can be optionally set, indicates how many total rows are there in the table
pub count: Option<usize>,
}
impl Rows {
pub fn empty() -> Self { Rows::new(vec![]) }
pub fn new(columns: Vec<String>) -> Self {
Rows {
columns,
data: vec![],
count: None,
}
}
pub fn push(&mut self, row: Vec<Value>) { self.data.push(row) }
/// Returns an iterator over the `Row`s.
pub fn iter(&self) -> Iter {
Iter {
columns: self.columns.clone(),
iter: self.data.iter(),
}
}
}
/// An iterator over `Row`s.
pub struct Iter<'a> {
columns: Vec<String>,
iter: slice::Iter<'a, Vec<Value>>,
}
impl<'a> Iterator for Iter<'a> {
type Item = Dao;
fn next(&mut self) -> Option<Dao> {
let next_row = self.iter.next();
if let Some(row) = next_row {
if!row.is_empty() {
let mut dao = Dao::new();
for (i, column) in self.columns.iter().enumerate() {
if let Some(value) = row.get(i) {
dao.insert_value(column, value);
}
}
Some(dao)
} else {
None
}
} else {
None
}
}
fn | (&self) -> (usize, Option<usize>) { self.iter.size_hint() }
}
impl<'a> ExactSizeIterator for Iter<'a> {}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn iteration_count() {
let columns = vec!["id".to_string(), "username".to_string()];
let data: Vec<Vec<Value>> = vec![vec![1.into(), "ivanceras".into()]];
let rows = Rows {
columns,
data,
count: None,
};
assert_eq!(1, rows.iter().count());
}
#[test]
fn iteration_count2() {
let columns = vec!["id".to_string(), "username".to_string()];
let data: Vec<Vec<Value>> = vec![vec![1.into(), "ivanceras".into()], vec![
2.into(),
"lee".into(),
]];
let rows = Rows {
columns,
data,
count: None,
};
assert_eq!(2, rows.iter().count());
}
#[test]
fn dao() {
let columns = vec!["id".to_string(), "username".to_string()];
let data: Vec<Vec<Value>> = vec![vec![1.into(), "ivanceras".into()]];
let rows = Rows {
columns,
data,
count: None,
};
let mut dao = Dao::new();
dao.insert("id", 1);
dao.insert("username", "ivanceras");
assert_eq!(dao, rows.iter().next().unwrap());
}
#[test]
fn dao2() {
let columns = vec!["id".to_string(), "username".to_string()];
let data: Vec<Vec<Value>> = vec![vec![1.into(), "ivanceras".into()], vec![
2.into(),
"lee".into(),
]];
let rows = Rows {
columns,
data,
count: None,
};
let mut iter = rows.iter();
let mut dao = Dao::new();
dao.insert("id", 1);
dao.insert("username", "ivanceras");
assert_eq!(dao, iter.next().unwrap());
let mut dao2 = Dao::new();
dao2.insert("id", 2);
dao2.insert("username", "lee");
assert_eq!(dao2, iter.next().unwrap());
}
}
| size_hint | identifier_name |
rows.rs | use crate::{
Dao,
Value,
};
use serde_derive::{
Deserialize,
Serialize,
};
use std::slice;
/// use this to store data retrieved from the database
/// This is also slimmer than Vec<Dao> when serialized
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct Rows {
pub columns: Vec<String>,
pub data: Vec<Vec<Value>>,
/// can be optionally set, indicates how many total rows are there in the table
pub count: Option<usize>,
}
|
pub fn new(columns: Vec<String>) -> Self {
Rows {
columns,
data: vec![],
count: None,
}
}
pub fn push(&mut self, row: Vec<Value>) { self.data.push(row) }
/// Returns an iterator over the `Row`s.
pub fn iter(&self) -> Iter {
Iter {
columns: self.columns.clone(),
iter: self.data.iter(),
}
}
}
/// An iterator over `Row`s.
pub struct Iter<'a> {
columns: Vec<String>,
iter: slice::Iter<'a, Vec<Value>>,
}
impl<'a> Iterator for Iter<'a> {
type Item = Dao;
fn next(&mut self) -> Option<Dao> {
let next_row = self.iter.next();
if let Some(row) = next_row {
if!row.is_empty() {
let mut dao = Dao::new();
for (i, column) in self.columns.iter().enumerate() {
if let Some(value) = row.get(i) {
dao.insert_value(column, value);
}
}
Some(dao)
} else {
None
}
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
}
impl<'a> ExactSizeIterator for Iter<'a> {}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn iteration_count() {
let columns = vec!["id".to_string(), "username".to_string()];
let data: Vec<Vec<Value>> = vec![vec![1.into(), "ivanceras".into()]];
let rows = Rows {
columns,
data,
count: None,
};
assert_eq!(1, rows.iter().count());
}
#[test]
fn iteration_count2() {
let columns = vec!["id".to_string(), "username".to_string()];
let data: Vec<Vec<Value>> = vec![vec![1.into(), "ivanceras".into()], vec![
2.into(),
"lee".into(),
]];
let rows = Rows {
columns,
data,
count: None,
};
assert_eq!(2, rows.iter().count());
}
#[test]
fn dao() {
let columns = vec!["id".to_string(), "username".to_string()];
let data: Vec<Vec<Value>> = vec![vec![1.into(), "ivanceras".into()]];
let rows = Rows {
columns,
data,
count: None,
};
let mut dao = Dao::new();
dao.insert("id", 1);
dao.insert("username", "ivanceras");
assert_eq!(dao, rows.iter().next().unwrap());
}
#[test]
fn dao2() {
let columns = vec!["id".to_string(), "username".to_string()];
let data: Vec<Vec<Value>> = vec![vec![1.into(), "ivanceras".into()], vec![
2.into(),
"lee".into(),
]];
let rows = Rows {
columns,
data,
count: None,
};
let mut iter = rows.iter();
let mut dao = Dao::new();
dao.insert("id", 1);
dao.insert("username", "ivanceras");
assert_eq!(dao, iter.next().unwrap());
let mut dao2 = Dao::new();
dao2.insert("id", 2);
dao2.insert("username", "lee");
assert_eq!(dao2, iter.next().unwrap());
}
} | impl Rows {
pub fn empty() -> Self { Rows::new(vec![]) } | random_line_split |
query14.rs | use timely::order::TotalOrder;
use timely::dataflow::*;
use timely::dataflow::operators::probe::Handle as ProbeHandle;
use differential_dataflow::operators::*;
use differential_dataflow::operators::arrange::ArrangeBySelf;
use differential_dataflow::difference::DiffPair;
use differential_dataflow::lattice::Lattice;
use {Arrangements, Experiment, Collections};
use ::types::create_date;
// -- $ID$
// -- TPC-H/TPC-R Promotion Effect Query (Q14)
// -- Functional Query Definition
// -- Approved February 1998
// :x
// :o
// select
// 100.00 * sum(case
// when p_type like 'PROMO%'
// then l_extendedprice * (1 - l_discount)
// else 0
// end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue
// from
// lineitem,
// part
// where
// l_partkey = p_partkey
// and l_shipdate >= date ':1'
// and l_shipdate < date ':1' + interval '1' month;
// :n -1
fn starts_with(source: &[u8], query: &[u8]) -> bool {
source.len() >= query.len() && &source[..query.len()] == query
}
pub fn query<G: Scope>(collections: &mut Collections<G>, probe: &mut ProbeHandle<G::Timestamp>)
where G::Timestamp: Lattice+TotalOrder+Ord {
let lineitems =
collections
.lineitems()
.explode(|l|
if create_date(1995,9,1) <= l.ship_date && l.ship_date < create_date(1995,10,1) {
Some((l.part_key, (l.extended_price * (100 - l.discount) / 100) as isize ))
}
else { None }
)
.arrange_by_self();
collections
.parts()
.explode(|p| Some((p.part_key, DiffPair::new(1, if starts_with(&p.typ.as_bytes(), b"PROMO") { 1 } else { 0 }))))
.arrange_by_self()
.join_core(&lineitems, |&_part_key, _, _| Some(()))
.count_total()
//.inspect(|x| println!("{:?}", x))
.probe_with(probe);
}
pub fn query_arranged<G: Scope<Timestamp=usize>>(
scope: &mut G,
probe: &mut ProbeHandle<usize>,
experiment: &mut Experiment,
arrangements: &mut Arrangements,
)
where
G::Timestamp: Lattice+TotalOrder+Ord
| {
let arrangements = arrangements.in_scope(scope, experiment);
experiment
.lineitem(scope)
.explode(|l|
if create_date(1995,9,1) <= l.ship_date && l.ship_date < create_date(1995,10,1) {
Some((l.part_key, (l.extended_price * (100 - l.discount) / 100) as isize ))
}
else { None }
)
.arrange_by_self()
.join_core(&arrangements.part, |_pk,&(),p| Some(DiffPair::new(1, if starts_with(&p.typ.as_bytes(), b"PROMO") { 1 } else { 0 })))
.explode(|dp| Some(((),dp)))
.count_total()
.probe_with(probe);
} | identifier_body |
|
query14.rs | use differential_dataflow::operators::*;
use differential_dataflow::operators::arrange::ArrangeBySelf;
use differential_dataflow::difference::DiffPair;
use differential_dataflow::lattice::Lattice;
use {Arrangements, Experiment, Collections};
use ::types::create_date;
// -- $ID$
// -- TPC-H/TPC-R Promotion Effect Query (Q14)
// -- Functional Query Definition
// -- Approved February 1998
// :x
// :o
// select
// 100.00 * sum(case
// when p_type like 'PROMO%'
// then l_extendedprice * (1 - l_discount)
// else 0
// end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue
// from
// lineitem,
// part
// where
// l_partkey = p_partkey
// and l_shipdate >= date ':1'
// and l_shipdate < date ':1' + interval '1' month;
// :n -1
fn starts_with(source: &[u8], query: &[u8]) -> bool {
source.len() >= query.len() && &source[..query.len()] == query
}
pub fn query<G: Scope>(collections: &mut Collections<G>, probe: &mut ProbeHandle<G::Timestamp>)
where G::Timestamp: Lattice+TotalOrder+Ord {
let lineitems =
collections
.lineitems()
.explode(|l|
if create_date(1995,9,1) <= l.ship_date && l.ship_date < create_date(1995,10,1) {
Some((l.part_key, (l.extended_price * (100 - l.discount) / 100) as isize ))
}
else { None }
)
.arrange_by_self();
collections
.parts()
.explode(|p| Some((p.part_key, DiffPair::new(1, if starts_with(&p.typ.as_bytes(), b"PROMO") { 1 } else { 0 }))))
.arrange_by_self()
.join_core(&lineitems, |&_part_key, _, _| Some(()))
.count_total()
//.inspect(|x| println!("{:?}", x))
.probe_with(probe);
}
pub fn query_arranged<G: Scope<Timestamp=usize>>(
scope: &mut G,
probe: &mut ProbeHandle<usize>,
experiment: &mut Experiment,
arrangements: &mut Arrangements,
)
where
G::Timestamp: Lattice+TotalOrder+Ord
{
let arrangements = arrangements.in_scope(scope, experiment);
experiment
.lineitem(scope)
.explode(|l|
if create_date(1995,9,1) <= l.ship_date && l.ship_date < create_date(1995,10,1) {
Some((l.part_key, (l.extended_price * (100 - l.discount) / 100) as isize ))
}
else { None }
)
.arrange_by_self()
.join_core(&arrangements.part, |_pk,&(),p| Some(DiffPair::new(1, if starts_with(&p.typ.as_bytes(), b"PROMO") { 1 } else { 0 })))
.explode(|dp| Some(((),dp)))
.count_total()
.probe_with(probe);
} | use timely::order::TotalOrder;
use timely::dataflow::*;
use timely::dataflow::operators::probe::Handle as ProbeHandle;
| random_line_split |
|
query14.rs | use timely::order::TotalOrder;
use timely::dataflow::*;
use timely::dataflow::operators::probe::Handle as ProbeHandle;
use differential_dataflow::operators::*;
use differential_dataflow::operators::arrange::ArrangeBySelf;
use differential_dataflow::difference::DiffPair;
use differential_dataflow::lattice::Lattice;
use {Arrangements, Experiment, Collections};
use ::types::create_date;
// -- $ID$
// -- TPC-H/TPC-R Promotion Effect Query (Q14)
// -- Functional Query Definition
// -- Approved February 1998
// :x
// :o
// select
// 100.00 * sum(case
// when p_type like 'PROMO%'
// then l_extendedprice * (1 - l_discount)
// else 0
// end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue
// from
// lineitem,
// part
// where
// l_partkey = p_partkey
// and l_shipdate >= date ':1'
// and l_shipdate < date ':1' + interval '1' month;
// :n -1
fn starts_with(source: &[u8], query: &[u8]) -> bool {
source.len() >= query.len() && &source[..query.len()] == query
}
pub fn query<G: Scope>(collections: &mut Collections<G>, probe: &mut ProbeHandle<G::Timestamp>)
where G::Timestamp: Lattice+TotalOrder+Ord {
let lineitems =
collections
.lineitems()
.explode(|l|
if create_date(1995,9,1) <= l.ship_date && l.ship_date < create_date(1995,10,1) {
Some((l.part_key, (l.extended_price * (100 - l.discount) / 100) as isize ))
}
else { None }
)
.arrange_by_self();
collections
.parts()
.explode(|p| Some((p.part_key, DiffPair::new(1, if starts_with(&p.typ.as_bytes(), b"PROMO") { 1 } else { 0 }))))
.arrange_by_self()
.join_core(&lineitems, |&_part_key, _, _| Some(()))
.count_total()
//.inspect(|x| println!("{:?}", x))
.probe_with(probe);
}
pub fn query_arranged<G: Scope<Timestamp=usize>>(
scope: &mut G,
probe: &mut ProbeHandle<usize>,
experiment: &mut Experiment,
arrangements: &mut Arrangements,
)
where
G::Timestamp: Lattice+TotalOrder+Ord
{
let arrangements = arrangements.in_scope(scope, experiment);
experiment
.lineitem(scope)
.explode(|l|
if create_date(1995,9,1) <= l.ship_date && l.ship_date < create_date(1995,10,1) {
Some((l.part_key, (l.extended_price * (100 - l.discount) / 100) as isize ))
}
else |
)
.arrange_by_self()
.join_core(&arrangements.part, |_pk,&(),p| Some(DiffPair::new(1, if starts_with(&p.typ.as_bytes(), b"PROMO") { 1 } else { 0 })))
.explode(|dp| Some(((),dp)))
.count_total()
.probe_with(probe);
} | { None } | conditional_block |
query14.rs | use timely::order::TotalOrder;
use timely::dataflow::*;
use timely::dataflow::operators::probe::Handle as ProbeHandle;
use differential_dataflow::operators::*;
use differential_dataflow::operators::arrange::ArrangeBySelf;
use differential_dataflow::difference::DiffPair;
use differential_dataflow::lattice::Lattice;
use {Arrangements, Experiment, Collections};
use ::types::create_date;
// -- $ID$
// -- TPC-H/TPC-R Promotion Effect Query (Q14)
// -- Functional Query Definition
// -- Approved February 1998
// :x
// :o
// select
// 100.00 * sum(case
// when p_type like 'PROMO%'
// then l_extendedprice * (1 - l_discount)
// else 0
// end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue
// from
// lineitem,
// part
// where
// l_partkey = p_partkey
// and l_shipdate >= date ':1'
// and l_shipdate < date ':1' + interval '1' month;
// :n -1
fn | (source: &[u8], query: &[u8]) -> bool {
source.len() >= query.len() && &source[..query.len()] == query
}
pub fn query<G: Scope>(collections: &mut Collections<G>, probe: &mut ProbeHandle<G::Timestamp>)
where G::Timestamp: Lattice+TotalOrder+Ord {
let lineitems =
collections
.lineitems()
.explode(|l|
if create_date(1995,9,1) <= l.ship_date && l.ship_date < create_date(1995,10,1) {
Some((l.part_key, (l.extended_price * (100 - l.discount) / 100) as isize ))
}
else { None }
)
.arrange_by_self();
collections
.parts()
.explode(|p| Some((p.part_key, DiffPair::new(1, if starts_with(&p.typ.as_bytes(), b"PROMO") { 1 } else { 0 }))))
.arrange_by_self()
.join_core(&lineitems, |&_part_key, _, _| Some(()))
.count_total()
//.inspect(|x| println!("{:?}", x))
.probe_with(probe);
}
pub fn query_arranged<G: Scope<Timestamp=usize>>(
scope: &mut G,
probe: &mut ProbeHandle<usize>,
experiment: &mut Experiment,
arrangements: &mut Arrangements,
)
where
G::Timestamp: Lattice+TotalOrder+Ord
{
let arrangements = arrangements.in_scope(scope, experiment);
experiment
.lineitem(scope)
.explode(|l|
if create_date(1995,9,1) <= l.ship_date && l.ship_date < create_date(1995,10,1) {
Some((l.part_key, (l.extended_price * (100 - l.discount) / 100) as isize ))
}
else { None }
)
.arrange_by_self()
.join_core(&arrangements.part, |_pk,&(),p| Some(DiffPair::new(1, if starts_with(&p.typ.as_bytes(), b"PROMO") { 1 } else { 0 })))
.explode(|dp| Some(((),dp)))
.count_total()
.probe_with(probe);
} | starts_with | identifier_name |
request.rs | //! Client Requests
use std::marker::PhantomData;
use std::io::{self, Write, BufWriter};
use url::Url;
use method::{self, Method};
use header::Headers;
use header::{self, Host};
use net::{NetworkStream, NetworkConnector, HttpConnector, Fresh, Streaming};
use http::{HttpWriter, LINE_ENDING};
use http::HttpWriter::{ThroughWriter, ChunkedWriter, SizedWriter, EmptyWriter};
use version;
use HttpResult;
use client::{Response, get_host_and_port};
/// A client request to a remote server.
pub struct Request<W> {
/// The target URI for this request.
pub url: Url,
/// The HTTP version of this request.
pub version: version::HttpVersion,
body: HttpWriter<BufWriter<Box<NetworkStream + Send>>>,
headers: Headers,
method: method::Method,
_marker: PhantomData<W>,
}
impl<W> Request<W> {
/// Read the Request headers.
#[inline]
pub fn headers(&self) -> &Headers { &self.headers }
/// Read the Request method.
#[inline]
pub fn method(&self) -> method::Method { self.method.clone() }
}
impl Request<Fresh> {
/// Create a new client request.
pub fn new(method: method::Method, url: Url) -> HttpResult<Request<Fresh>> {
let mut conn = HttpConnector(None);
Request::with_connector(method, url, &mut conn)
}
/// Create a new client request with a specific underlying NetworkStream.
pub fn with_connector<C, S>(method: method::Method, url: Url, connector: &mut C)
-> HttpResult<Request<Fresh>> where
C: NetworkConnector<Stream=S>,
S: Into<Box<NetworkStream + Send>> {
debug!("{} {}", method, url);
let (host, port) = try!(get_host_and_port(&url));
let stream = try!(connector.connect(&*host, port, &*url.scheme)).into();
let stream = ThroughWriter(BufWriter::new(stream));
let mut headers = Headers::new();
headers.set(Host {
hostname: host,
port: Some(port),
});
Ok(Request {
method: method,
headers: headers,
url: url,
version: version::HttpVersion::Http11,
body: stream,
_marker: PhantomData,
})
}
/// Consume a Fresh Request, writing the headers and method,
/// returning a Streaming Request.
pub fn start(mut self) -> HttpResult<Request<Streaming>> {
let mut uri = self.url.serialize_path().unwrap();
//TODO: this needs a test
if let Some(ref q) = self.url.query {
uri.push('?');
uri.push_str(&q[..]);
}
debug!("writing head: {:?} {:?} {:?}", self.method, uri, self.version);
try!(write!(&mut self.body, "{} {} {}{}",
self.method, uri, self.version, LINE_ENDING));
let stream = match self.method {
Method::Get | Method::Head => {
debug!("headers [\n{:?}]", self.headers);
try!(write!(&mut self.body, "{}{}", self.headers, LINE_ENDING));
EmptyWriter(self.body.into_inner())
},
_ => {
let mut chunked = true;
let mut len = 0;
match self.headers.get::<header::ContentLength>() {
Some(cl) => {
chunked = false;
len = **cl;
},
None => ()
};
// cant do in match above, thanks borrowck
if chunked |
debug!("headers [\n{:?}]", self.headers);
try!(write!(&mut self.body, "{}{}", self.headers, LINE_ENDING));
if chunked {
ChunkedWriter(self.body.into_inner())
} else {
SizedWriter(self.body.into_inner(), len)
}
}
};
Ok(Request {
method: self.method,
headers: self.headers,
url: self.url,
version: self.version,
body: stream,
_marker: PhantomData,
})
}
/// Get a mutable reference to the Request headers.
#[inline]
pub fn headers_mut(&mut self) -> &mut Headers { &mut self.headers }
}
impl Request<Streaming> {
/// Completes writing the request, and returns a response to read from.
///
/// Consumes the Request.
pub fn send(self) -> HttpResult<Response> {
let raw = try!(self.body.end()).into_inner().unwrap(); // end() already flushes
Response::new(raw)
}
}
impl Write for Request<Streaming> {
#[inline]
fn write(&mut self, msg: &[u8]) -> io::Result<usize> {
self.body.write(msg)
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
self.body.flush()
}
}
#[cfg(test)]
mod tests {
use std::str::from_utf8;
use url::Url;
use method::Method::{Get, Head};
use mock::{MockStream, MockConnector};
use super::Request;
#[test]
fn test_get_empty_body() {
let req = Request::with_connector(
Get, Url::parse("http://example.dom").unwrap(), &mut MockConnector
).unwrap();
let req = req.start().unwrap();
let stream = *req.body.end().unwrap()
.into_inner().unwrap().downcast::<MockStream>().ok().unwrap();
let bytes = stream.write;
let s = from_utf8(&bytes[..]).unwrap();
assert!(!s.contains("Content-Length:"));
assert!(!s.contains("Transfer-Encoding:"));
}
#[test]
fn test_head_empty_body() {
let req = Request::with_connector(
Head, Url::parse("http://example.dom").unwrap(), &mut MockConnector
).unwrap();
let req = req.start().unwrap();
let stream = *req.body.end().unwrap()
.into_inner().unwrap().downcast::<MockStream>().ok().unwrap();
let bytes = stream.write;
let s = from_utf8(&bytes[..]).unwrap();
assert!(!s.contains("Content-Length:"));
assert!(!s.contains("Transfer-Encoding:"));
}
}
| {
let encodings = match self.headers.get_mut::<header::TransferEncoding>() {
Some(&mut header::TransferEncoding(ref mut encodings)) => {
//TODO: check if chunked is already in encodings. use HashSet?
encodings.push(header::Encoding::Chunked);
false
},
None => true
};
if encodings {
self.headers.set::<header::TransferEncoding>(
header::TransferEncoding(vec![header::Encoding::Chunked]))
}
} | conditional_block |
request.rs | //! Client Requests
use std::marker::PhantomData;
use std::io::{self, Write, BufWriter};
use url::Url;
use method::{self, Method};
use header::Headers;
use header::{self, Host};
use net::{NetworkStream, NetworkConnector, HttpConnector, Fresh, Streaming};
use http::{HttpWriter, LINE_ENDING};
use http::HttpWriter::{ThroughWriter, ChunkedWriter, SizedWriter, EmptyWriter};
use version;
use HttpResult;
use client::{Response, get_host_and_port};
/// A client request to a remote server.
pub struct Request<W> {
/// The target URI for this request.
pub url: Url,
/// The HTTP version of this request.
pub version: version::HttpVersion,
body: HttpWriter<BufWriter<Box<NetworkStream + Send>>>,
headers: Headers,
method: method::Method,
_marker: PhantomData<W>,
}
impl<W> Request<W> {
/// Read the Request headers.
#[inline]
pub fn headers(&self) -> &Headers { &self.headers }
/// Read the Request method.
#[inline]
pub fn method(&self) -> method::Method { self.method.clone() }
}
impl Request<Fresh> {
/// Create a new client request.
pub fn new(method: method::Method, url: Url) -> HttpResult<Request<Fresh>> {
let mut conn = HttpConnector(None);
Request::with_connector(method, url, &mut conn)
}
/// Create a new client request with a specific underlying NetworkStream.
pub fn with_connector<C, S>(method: method::Method, url: Url, connector: &mut C)
-> HttpResult<Request<Fresh>> where
C: NetworkConnector<Stream=S>,
S: Into<Box<NetworkStream + Send>> {
debug!("{} {}", method, url);
let (host, port) = try!(get_host_and_port(&url));
let stream = try!(connector.connect(&*host, port, &*url.scheme)).into();
let stream = ThroughWriter(BufWriter::new(stream));
let mut headers = Headers::new();
headers.set(Host {
hostname: host,
port: Some(port),
});
Ok(Request {
method: method,
headers: headers,
url: url,
version: version::HttpVersion::Http11,
body: stream,
_marker: PhantomData,
})
}
/// Consume a Fresh Request, writing the headers and method,
/// returning a Streaming Request.
pub fn start(mut self) -> HttpResult<Request<Streaming>> {
let mut uri = self.url.serialize_path().unwrap();
//TODO: this needs a test
if let Some(ref q) = self.url.query {
uri.push('?');
uri.push_str(&q[..]);
}
debug!("writing head: {:?} {:?} {:?}", self.method, uri, self.version);
try!(write!(&mut self.body, "{} {} {}{}",
self.method, uri, self.version, LINE_ENDING));
let stream = match self.method {
Method::Get | Method::Head => {
debug!("headers [\n{:?}]", self.headers);
try!(write!(&mut self.body, "{}{}", self.headers, LINE_ENDING));
EmptyWriter(self.body.into_inner())
},
_ => {
let mut chunked = true;
let mut len = 0;
match self.headers.get::<header::ContentLength>() {
Some(cl) => {
chunked = false;
len = **cl;
},
None => ()
};
// cant do in match above, thanks borrowck
if chunked {
let encodings = match self.headers.get_mut::<header::TransferEncoding>() {
Some(&mut header::TransferEncoding(ref mut encodings)) => {
//TODO: check if chunked is already in encodings. use HashSet?
encodings.push(header::Encoding::Chunked);
false
},
None => true
};
if encodings {
self.headers.set::<header::TransferEncoding>(
header::TransferEncoding(vec![header::Encoding::Chunked]))
}
}
debug!("headers [\n{:?}]", self.headers);
try!(write!(&mut self.body, "{}{}", self.headers, LINE_ENDING));
if chunked {
ChunkedWriter(self.body.into_inner())
} else {
SizedWriter(self.body.into_inner(), len)
}
}
};
Ok(Request {
method: self.method,
headers: self.headers,
url: self.url,
version: self.version,
body: stream,
_marker: PhantomData,
})
}
/// Get a mutable reference to the Request headers.
#[inline]
pub fn headers_mut(&mut self) -> &mut Headers |
}
impl Request<Streaming> {
/// Completes writing the request, and returns a response to read from.
///
/// Consumes the Request.
pub fn send(self) -> HttpResult<Response> {
let raw = try!(self.body.end()).into_inner().unwrap(); // end() already flushes
Response::new(raw)
}
}
impl Write for Request<Streaming> {
#[inline]
fn write(&mut self, msg: &[u8]) -> io::Result<usize> {
self.body.write(msg)
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
self.body.flush()
}
}
#[cfg(test)]
mod tests {
use std::str::from_utf8;
use url::Url;
use method::Method::{Get, Head};
use mock::{MockStream, MockConnector};
use super::Request;
#[test]
fn test_get_empty_body() {
let req = Request::with_connector(
Get, Url::parse("http://example.dom").unwrap(), &mut MockConnector
).unwrap();
let req = req.start().unwrap();
let stream = *req.body.end().unwrap()
.into_inner().unwrap().downcast::<MockStream>().ok().unwrap();
let bytes = stream.write;
let s = from_utf8(&bytes[..]).unwrap();
assert!(!s.contains("Content-Length:"));
assert!(!s.contains("Transfer-Encoding:"));
}
#[test]
fn test_head_empty_body() {
let req = Request::with_connector(
Head, Url::parse("http://example.dom").unwrap(), &mut MockConnector
).unwrap();
let req = req.start().unwrap();
let stream = *req.body.end().unwrap()
.into_inner().unwrap().downcast::<MockStream>().ok().unwrap();
let bytes = stream.write;
let s = from_utf8(&bytes[..]).unwrap();
assert!(!s.contains("Content-Length:"));
assert!(!s.contains("Transfer-Encoding:"));
}
}
| { &mut self.headers } | identifier_body |
request.rs | //! Client Requests
use std::marker::PhantomData;
use std::io::{self, Write, BufWriter};
use url::Url;
use method::{self, Method};
use header::Headers;
use header::{self, Host};
use net::{NetworkStream, NetworkConnector, HttpConnector, Fresh, Streaming};
use http::{HttpWriter, LINE_ENDING};
use http::HttpWriter::{ThroughWriter, ChunkedWriter, SizedWriter, EmptyWriter};
use version;
use HttpResult;
use client::{Response, get_host_and_port};
/// A client request to a remote server.
pub struct Request<W> {
/// The target URI for this request.
pub url: Url,
/// The HTTP version of this request.
pub version: version::HttpVersion,
body: HttpWriter<BufWriter<Box<NetworkStream + Send>>>,
headers: Headers,
method: method::Method,
_marker: PhantomData<W>,
}
impl<W> Request<W> {
/// Read the Request headers.
#[inline]
pub fn headers(&self) -> &Headers { &self.headers }
/// Read the Request method.
#[inline]
pub fn method(&self) -> method::Method { self.method.clone() }
}
impl Request<Fresh> {
/// Create a new client request.
pub fn new(method: method::Method, url: Url) -> HttpResult<Request<Fresh>> {
let mut conn = HttpConnector(None);
Request::with_connector(method, url, &mut conn)
}
/// Create a new client request with a specific underlying NetworkStream.
pub fn with_connector<C, S>(method: method::Method, url: Url, connector: &mut C)
-> HttpResult<Request<Fresh>> where
C: NetworkConnector<Stream=S>,
S: Into<Box<NetworkStream + Send>> {
debug!("{} {}", method, url);
let (host, port) = try!(get_host_and_port(&url));
let stream = try!(connector.connect(&*host, port, &*url.scheme)).into();
let stream = ThroughWriter(BufWriter::new(stream));
let mut headers = Headers::new();
headers.set(Host {
hostname: host,
port: Some(port),
});
Ok(Request {
method: method,
headers: headers,
url: url,
version: version::HttpVersion::Http11,
body: stream,
_marker: PhantomData,
})
}
/// Consume a Fresh Request, writing the headers and method,
/// returning a Streaming Request.
pub fn start(mut self) -> HttpResult<Request<Streaming>> {
let mut uri = self.url.serialize_path().unwrap();
//TODO: this needs a test
if let Some(ref q) = self.url.query {
uri.push('?');
uri.push_str(&q[..]);
}
debug!("writing head: {:?} {:?} {:?}", self.method, uri, self.version);
try!(write!(&mut self.body, "{} {} {}{}",
self.method, uri, self.version, LINE_ENDING));
let stream = match self.method {
Method::Get | Method::Head => {
debug!("headers [\n{:?}]", self.headers);
try!(write!(&mut self.body, "{}{}", self.headers, LINE_ENDING));
EmptyWriter(self.body.into_inner())
},
_ => {
let mut chunked = true;
let mut len = 0;
match self.headers.get::<header::ContentLength>() {
Some(cl) => {
chunked = false;
len = **cl;
},
None => ()
};
// cant do in match above, thanks borrowck
if chunked {
let encodings = match self.headers.get_mut::<header::TransferEncoding>() {
Some(&mut header::TransferEncoding(ref mut encodings)) => {
//TODO: check if chunked is already in encodings. use HashSet?
encodings.push(header::Encoding::Chunked);
false
},
None => true
};
if encodings {
self.headers.set::<header::TransferEncoding>(
header::TransferEncoding(vec![header::Encoding::Chunked]))
}
}
| debug!("headers [\n{:?}]", self.headers);
try!(write!(&mut self.body, "{}{}", self.headers, LINE_ENDING));
if chunked {
ChunkedWriter(self.body.into_inner())
} else {
SizedWriter(self.body.into_inner(), len)
}
}
};
Ok(Request {
method: self.method,
headers: self.headers,
url: self.url,
version: self.version,
body: stream,
_marker: PhantomData,
})
}
/// Get a mutable reference to the Request headers.
#[inline]
pub fn headers_mut(&mut self) -> &mut Headers { &mut self.headers }
}
impl Request<Streaming> {
/// Completes writing the request, and returns a response to read from.
///
/// Consumes the Request.
pub fn send(self) -> HttpResult<Response> {
let raw = try!(self.body.end()).into_inner().unwrap(); // end() already flushes
Response::new(raw)
}
}
impl Write for Request<Streaming> {
#[inline]
fn write(&mut self, msg: &[u8]) -> io::Result<usize> {
self.body.write(msg)
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
self.body.flush()
}
}
#[cfg(test)]
mod tests {
use std::str::from_utf8;
use url::Url;
use method::Method::{Get, Head};
use mock::{MockStream, MockConnector};
use super::Request;
#[test]
fn test_get_empty_body() {
let req = Request::with_connector(
Get, Url::parse("http://example.dom").unwrap(), &mut MockConnector
).unwrap();
let req = req.start().unwrap();
let stream = *req.body.end().unwrap()
.into_inner().unwrap().downcast::<MockStream>().ok().unwrap();
let bytes = stream.write;
let s = from_utf8(&bytes[..]).unwrap();
assert!(!s.contains("Content-Length:"));
assert!(!s.contains("Transfer-Encoding:"));
}
#[test]
fn test_head_empty_body() {
let req = Request::with_connector(
Head, Url::parse("http://example.dom").unwrap(), &mut MockConnector
).unwrap();
let req = req.start().unwrap();
let stream = *req.body.end().unwrap()
.into_inner().unwrap().downcast::<MockStream>().ok().unwrap();
let bytes = stream.write;
let s = from_utf8(&bytes[..]).unwrap();
assert!(!s.contains("Content-Length:"));
assert!(!s.contains("Transfer-Encoding:"));
}
} | random_line_split |
|
request.rs | //! Client Requests
use std::marker::PhantomData;
use std::io::{self, Write, BufWriter};
use url::Url;
use method::{self, Method};
use header::Headers;
use header::{self, Host};
use net::{NetworkStream, NetworkConnector, HttpConnector, Fresh, Streaming};
use http::{HttpWriter, LINE_ENDING};
use http::HttpWriter::{ThroughWriter, ChunkedWriter, SizedWriter, EmptyWriter};
use version;
use HttpResult;
use client::{Response, get_host_and_port};
/// A client request to a remote server.
pub struct Request<W> {
/// The target URI for this request.
pub url: Url,
/// The HTTP version of this request.
pub version: version::HttpVersion,
body: HttpWriter<BufWriter<Box<NetworkStream + Send>>>,
headers: Headers,
method: method::Method,
_marker: PhantomData<W>,
}
impl<W> Request<W> {
/// Read the Request headers.
#[inline]
pub fn headers(&self) -> &Headers { &self.headers }
/// Read the Request method.
#[inline]
pub fn method(&self) -> method::Method { self.method.clone() }
}
impl Request<Fresh> {
/// Create a new client request.
pub fn new(method: method::Method, url: Url) -> HttpResult<Request<Fresh>> {
let mut conn = HttpConnector(None);
Request::with_connector(method, url, &mut conn)
}
/// Create a new client request with a specific underlying NetworkStream.
pub fn with_connector<C, S>(method: method::Method, url: Url, connector: &mut C)
-> HttpResult<Request<Fresh>> where
C: NetworkConnector<Stream=S>,
S: Into<Box<NetworkStream + Send>> {
debug!("{} {}", method, url);
let (host, port) = try!(get_host_and_port(&url));
let stream = try!(connector.connect(&*host, port, &*url.scheme)).into();
let stream = ThroughWriter(BufWriter::new(stream));
let mut headers = Headers::new();
headers.set(Host {
hostname: host,
port: Some(port),
});
Ok(Request {
method: method,
headers: headers,
url: url,
version: version::HttpVersion::Http11,
body: stream,
_marker: PhantomData,
})
}
/// Consume a Fresh Request, writing the headers and method,
/// returning a Streaming Request.
pub fn start(mut self) -> HttpResult<Request<Streaming>> {
let mut uri = self.url.serialize_path().unwrap();
//TODO: this needs a test
if let Some(ref q) = self.url.query {
uri.push('?');
uri.push_str(&q[..]);
}
debug!("writing head: {:?} {:?} {:?}", self.method, uri, self.version);
try!(write!(&mut self.body, "{} {} {}{}",
self.method, uri, self.version, LINE_ENDING));
let stream = match self.method {
Method::Get | Method::Head => {
debug!("headers [\n{:?}]", self.headers);
try!(write!(&mut self.body, "{}{}", self.headers, LINE_ENDING));
EmptyWriter(self.body.into_inner())
},
_ => {
let mut chunked = true;
let mut len = 0;
match self.headers.get::<header::ContentLength>() {
Some(cl) => {
chunked = false;
len = **cl;
},
None => ()
};
// cant do in match above, thanks borrowck
if chunked {
let encodings = match self.headers.get_mut::<header::TransferEncoding>() {
Some(&mut header::TransferEncoding(ref mut encodings)) => {
//TODO: check if chunked is already in encodings. use HashSet?
encodings.push(header::Encoding::Chunked);
false
},
None => true
};
if encodings {
self.headers.set::<header::TransferEncoding>(
header::TransferEncoding(vec![header::Encoding::Chunked]))
}
}
debug!("headers [\n{:?}]", self.headers);
try!(write!(&mut self.body, "{}{}", self.headers, LINE_ENDING));
if chunked {
ChunkedWriter(self.body.into_inner())
} else {
SizedWriter(self.body.into_inner(), len)
}
}
};
Ok(Request {
method: self.method,
headers: self.headers,
url: self.url,
version: self.version,
body: stream,
_marker: PhantomData,
})
}
/// Get a mutable reference to the Request headers.
#[inline]
pub fn headers_mut(&mut self) -> &mut Headers { &mut self.headers }
}
impl Request<Streaming> {
/// Completes writing the request, and returns a response to read from.
///
/// Consumes the Request.
pub fn send(self) -> HttpResult<Response> {
let raw = try!(self.body.end()).into_inner().unwrap(); // end() already flushes
Response::new(raw)
}
}
impl Write for Request<Streaming> {
#[inline]
fn | (&mut self, msg: &[u8]) -> io::Result<usize> {
self.body.write(msg)
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
self.body.flush()
}
}
#[cfg(test)]
mod tests {
use std::str::from_utf8;
use url::Url;
use method::Method::{Get, Head};
use mock::{MockStream, MockConnector};
use super::Request;
#[test]
fn test_get_empty_body() {
let req = Request::with_connector(
Get, Url::parse("http://example.dom").unwrap(), &mut MockConnector
).unwrap();
let req = req.start().unwrap();
let stream = *req.body.end().unwrap()
.into_inner().unwrap().downcast::<MockStream>().ok().unwrap();
let bytes = stream.write;
let s = from_utf8(&bytes[..]).unwrap();
assert!(!s.contains("Content-Length:"));
assert!(!s.contains("Transfer-Encoding:"));
}
#[test]
fn test_head_empty_body() {
let req = Request::with_connector(
Head, Url::parse("http://example.dom").unwrap(), &mut MockConnector
).unwrap();
let req = req.start().unwrap();
let stream = *req.body.end().unwrap()
.into_inner().unwrap().downcast::<MockStream>().ok().unwrap();
let bytes = stream.write;
let s = from_utf8(&bytes[..]).unwrap();
assert!(!s.contains("Content-Length:"));
assert!(!s.contains("Transfer-Encoding:"));
}
}
| write | identifier_name |
lib.rs | //! Boron is a small and expressive web framework for Rust which aims to give a robust foundation
//! for web applications and APIs.
//!
//! ## Installation
//! Add the following line to your `[dependecies]` section in `Cargo.toml`:
//!
//! ```toml
//! boron = "0.0.2"
//! ```
//!
//! ## Your first app
//!
//! ```rust,no_run
//! extern crate boron;
//!
//! use boron::server::Boron;
//! use boron::request::Request;
//! use boron::response::Response;
//! use boron::router::HttpMethods;
//!
//! fn main() {
//! let mut app = Boron::new();
//! app.get("/", |req: &Request, res: Response| {
//! res.send(b"Hello World! I am Boron.")
//! });
//! app.listen("localhost:3000");
//! }
//! ```
extern crate hyper;
extern crate url; | pub mod server;
pub mod response;
pub mod request;
pub mod middleware;
pub mod router;
mod matcher; | extern crate regex;
extern crate typemap;
| random_line_split |
fun-call-variants.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn ho<F>(f: F) -> isize where F: FnOnce(isize) -> isize { let n: isize = f(3); return n; }
fn direct(x: isize) -> isize { return x + 1; }
| let b: isize = ho(direct); // indirect unbound
assert_eq!(a, b);
} | pub fn main() {
let a: isize = direct(3); // direct | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.