file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
TestClz.rs | /*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
#pragma version(1)
#pragma rs java_package_name(android.renderscript.cts)
char __attribute__((kernel)) testClzCharChar(char inValue) {
return clz(inValue);
}
char2 __attribute__((kernel)) testClzChar2Char2(char2 inValue) {
return clz(inValue);
}
char3 __attribute__((kernel)) testClzChar3Char3(char3 inValue) {
return clz(inValue);
}
char4 __attribute__((kernel)) testClzChar4Char4(char4 inValue) {
return clz(inValue);
}
uchar __attribute__((kernel)) testClzUcharUchar(uchar inValue) {
return clz(inValue);
}
uchar2 __attribute__((kernel)) testClzUchar2Uchar2(uchar2 inValue) {
return clz(inValue);
}
uchar3 __attribute__((kernel)) testClzUchar3Uchar3(uchar3 inValue) {
return clz(inValue);
}
uchar4 __attribute__((kernel)) testClzUchar4Uchar4(uchar4 inValue) {
return clz(inValue);
}
short __attribute__((kernel)) testClzShortShort(short inValue) {
return clz(inValue);
}
short2 __attribute__((kernel)) testClzShort2Short2(short2 inValue) {
return clz(inValue);
}
short3 __attribute__((kernel)) testClzShort3Short3(short3 inValue) {
return clz(inValue);
}
short4 __attribute__((kernel)) testClzShort4Short4(short4 inValue) {
return clz(inValue);
}
ushort __attribute__((kernel)) testClzUshortUshort(ushort inValue) {
return clz(inValue);
}
ushort2 __attribute__((kernel)) testClzUshort2Ushort2(ushort2 inValue) {
return clz(inValue);
}
ushort3 __attribute__((kernel)) testClzUshort3Ushort3(ushort3 inValue) {
return clz(inValue);
}
ushort4 __attribute__((kernel)) testClzUshort4Ushort4(ushort4 inValue) { | }
int2 __attribute__((kernel)) testClzInt2Int2(int2 inValue) {
return clz(inValue);
}
int3 __attribute__((kernel)) testClzInt3Int3(int3 inValue) {
return clz(inValue);
}
int4 __attribute__((kernel)) testClzInt4Int4(int4 inValue) {
return clz(inValue);
}
uint __attribute__((kernel)) testClzUintUint(uint inValue) {
return clz(inValue);
}
uint2 __attribute__((kernel)) testClzUint2Uint2(uint2 inValue) {
return clz(inValue);
}
uint3 __attribute__((kernel)) testClzUint3Uint3(uint3 inValue) {
return clz(inValue);
}
uint4 __attribute__((kernel)) testClzUint4Uint4(uint4 inValue) {
return clz(inValue);
} | return clz(inValue);
}
int __attribute__((kernel)) testClzIntInt(int inValue) {
return clz(inValue); | random_line_split |
type_variable.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::RelationDir::*;
use self::TypeVariableValue::*;
use self::UndoEntry::*;
use middle::ty::{mod, Ty};
use std::mem;
use util::snapshot_vec as sv;
pub struct TypeVariableTable<'tcx> {
values: sv::SnapshotVec<TypeVariableData<'tcx>,UndoEntry,Delegate>,
}
struct TypeVariableData<'tcx> {
value: TypeVariableValue<'tcx>,
diverging: bool
}
enum TypeVariableValue<'tcx> {
Known(Ty<'tcx>),
Bounded(Vec<Relation>),
}
pub struct Snapshot {
snapshot: sv::Snapshot
}
enum UndoEntry {
// The type of the var was specified.
SpecifyVar(ty::TyVid, Vec<Relation>),
Relate(ty::TyVid, ty::TyVid),
}
struct Delegate;
type Relation = (RelationDir, ty::TyVid);
#[deriving(PartialEq,Show)]
pub enum RelationDir {
SubtypeOf, SupertypeOf, EqTo
}
impl RelationDir {
fn opposite(self) -> RelationDir {
match self {
SubtypeOf => SupertypeOf,
SupertypeOf => SubtypeOf,
EqTo => EqTo
}
}
}
impl<'tcx> TypeVariableTable<'tcx> {
pub fn new() -> TypeVariableTable<'tcx> {
TypeVariableTable { values: sv::SnapshotVec::new(Delegate) }
}
fn relations<'a>(&'a mut self, a: ty::TyVid) -> &'a mut Vec<Relation> {
relations(self.values.get_mut(a.index))
}
pub fn var_diverges<'a>(&'a self, vid: ty::TyVid) -> bool {
self.values.get(vid.index).diverging
}
pub fn relate_vars(&mut self, a: ty::TyVid, dir: RelationDir, b: ty::TyVid) {
/*!
* Records that `a <: b`, `a :> b`, or `a == b`, depending on `dir`.
*
* Precondition: neither `a` nor `b` are known.
*/
if a!= b {
self.relations(a).push((dir, b));
self.relations(b).push((dir.opposite(), a));
self.values.record(Relate(a, b));
}
}
pub fn instantiate_and_push(
&mut self,
vid: ty::TyVid,
ty: Ty<'tcx>,
stack: &mut Vec<(Ty<'tcx>, RelationDir, ty::TyVid)>)
{
/*! | * dir, vid1)` where `vid1` is some other variable id.
*/
let old_value = {
let value_ptr = &mut self.values.get_mut(vid.index).value;
mem::replace(value_ptr, Known(ty))
};
let relations = match old_value {
Bounded(b) => b,
Known(_) => panic!("Asked to instantiate variable that is \
already instantiated")
};
for &(dir, vid) in relations.iter() {
stack.push((ty, dir, vid));
}
self.values.record(SpecifyVar(vid, relations));
}
pub fn new_var(&mut self, diverging: bool) -> ty::TyVid {
let index = self.values.push(TypeVariableData {
value: Bounded(vec![]),
diverging: diverging
});
ty::TyVid { index: index }
}
pub fn probe(&self, vid: ty::TyVid) -> Option<Ty<'tcx>> {
match self.values.get(vid.index).value {
Bounded(..) => None,
Known(t) => Some(t)
}
}
pub fn replace_if_possible(&self, t: Ty<'tcx>) -> Ty<'tcx> {
match t.sty {
ty::ty_infer(ty::TyVar(v)) => {
match self.probe(v) {
None => t,
Some(u) => u
}
}
_ => t,
}
}
pub fn snapshot(&mut self) -> Snapshot {
Snapshot { snapshot: self.values.start_snapshot() }
}
pub fn rollback_to(&mut self, s: Snapshot) {
self.values.rollback_to(s.snapshot);
}
pub fn commit(&mut self, s: Snapshot) {
self.values.commit(s.snapshot);
}
}
impl<'tcx> sv::SnapshotVecDelegate<TypeVariableData<'tcx>,UndoEntry> for Delegate {
fn reverse(&mut self,
values: &mut Vec<TypeVariableData>,
action: UndoEntry) {
match action {
SpecifyVar(vid, relations) => {
values[vid.index].value = Bounded(relations);
}
Relate(a, b) => {
relations(&mut (*values)[a.index]).pop();
relations(&mut (*values)[b.index]).pop();
}
}
}
}
fn relations<'a>(v: &'a mut TypeVariableData) -> &'a mut Vec<Relation> {
match v.value {
Known(_) => panic!("var_sub_var: variable is known"),
Bounded(ref mut relations) => relations
}
} | * Instantiates `vid` with the type `ty` and then pushes an
* entry onto `stack` for each of the relations of `vid` to
* other variables. The relations will have the form `(ty, | random_line_split |
type_variable.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::RelationDir::*;
use self::TypeVariableValue::*;
use self::UndoEntry::*;
use middle::ty::{mod, Ty};
use std::mem;
use util::snapshot_vec as sv;
pub struct TypeVariableTable<'tcx> {
values: sv::SnapshotVec<TypeVariableData<'tcx>,UndoEntry,Delegate>,
}
struct TypeVariableData<'tcx> {
value: TypeVariableValue<'tcx>,
diverging: bool
}
enum TypeVariableValue<'tcx> {
Known(Ty<'tcx>),
Bounded(Vec<Relation>),
}
pub struct Snapshot {
snapshot: sv::Snapshot
}
enum UndoEntry {
// The type of the var was specified.
SpecifyVar(ty::TyVid, Vec<Relation>),
Relate(ty::TyVid, ty::TyVid),
}
struct Delegate;
type Relation = (RelationDir, ty::TyVid);
#[deriving(PartialEq,Show)]
pub enum RelationDir {
SubtypeOf, SupertypeOf, EqTo
}
impl RelationDir {
fn opposite(self) -> RelationDir {
match self {
SubtypeOf => SupertypeOf,
SupertypeOf => SubtypeOf,
EqTo => EqTo
}
}
}
impl<'tcx> TypeVariableTable<'tcx> {
pub fn new() -> TypeVariableTable<'tcx> {
TypeVariableTable { values: sv::SnapshotVec::new(Delegate) }
}
fn relations<'a>(&'a mut self, a: ty::TyVid) -> &'a mut Vec<Relation> {
relations(self.values.get_mut(a.index))
}
pub fn var_diverges<'a>(&'a self, vid: ty::TyVid) -> bool {
self.values.get(vid.index).diverging
}
pub fn relate_vars(&mut self, a: ty::TyVid, dir: RelationDir, b: ty::TyVid) {
/*!
* Records that `a <: b`, `a :> b`, or `a == b`, depending on `dir`.
*
* Precondition: neither `a` nor `b` are known.
*/
if a!= b {
self.relations(a).push((dir, b));
self.relations(b).push((dir.opposite(), a));
self.values.record(Relate(a, b));
}
}
pub fn instantiate_and_push(
&mut self,
vid: ty::TyVid,
ty: Ty<'tcx>,
stack: &mut Vec<(Ty<'tcx>, RelationDir, ty::TyVid)>)
{
/*!
* Instantiates `vid` with the type `ty` and then pushes an
* entry onto `stack` for each of the relations of `vid` to
* other variables. The relations will have the form `(ty,
* dir, vid1)` where `vid1` is some other variable id.
*/
let old_value = {
let value_ptr = &mut self.values.get_mut(vid.index).value;
mem::replace(value_ptr, Known(ty))
};
let relations = match old_value {
Bounded(b) => b,
Known(_) => panic!("Asked to instantiate variable that is \
already instantiated")
};
for &(dir, vid) in relations.iter() {
stack.push((ty, dir, vid));
}
self.values.record(SpecifyVar(vid, relations));
}
pub fn new_var(&mut self, diverging: bool) -> ty::TyVid {
let index = self.values.push(TypeVariableData {
value: Bounded(vec![]),
diverging: diverging
});
ty::TyVid { index: index }
}
pub fn probe(&self, vid: ty::TyVid) -> Option<Ty<'tcx>> |
pub fn replace_if_possible(&self, t: Ty<'tcx>) -> Ty<'tcx> {
match t.sty {
ty::ty_infer(ty::TyVar(v)) => {
match self.probe(v) {
None => t,
Some(u) => u
}
}
_ => t,
}
}
pub fn snapshot(&mut self) -> Snapshot {
Snapshot { snapshot: self.values.start_snapshot() }
}
pub fn rollback_to(&mut self, s: Snapshot) {
self.values.rollback_to(s.snapshot);
}
pub fn commit(&mut self, s: Snapshot) {
self.values.commit(s.snapshot);
}
}
impl<'tcx> sv::SnapshotVecDelegate<TypeVariableData<'tcx>,UndoEntry> for Delegate {
fn reverse(&mut self,
values: &mut Vec<TypeVariableData>,
action: UndoEntry) {
match action {
SpecifyVar(vid, relations) => {
values[vid.index].value = Bounded(relations);
}
Relate(a, b) => {
relations(&mut (*values)[a.index]).pop();
relations(&mut (*values)[b.index]).pop();
}
}
}
}
fn relations<'a>(v: &'a mut TypeVariableData) -> &'a mut Vec<Relation> {
match v.value {
Known(_) => panic!("var_sub_var: variable is known"),
Bounded(ref mut relations) => relations
}
}
| {
match self.values.get(vid.index).value {
Bounded(..) => None,
Known(t) => Some(t)
}
} | identifier_body |
type_variable.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::RelationDir::*;
use self::TypeVariableValue::*;
use self::UndoEntry::*;
use middle::ty::{mod, Ty};
use std::mem;
use util::snapshot_vec as sv;
pub struct TypeVariableTable<'tcx> {
values: sv::SnapshotVec<TypeVariableData<'tcx>,UndoEntry,Delegate>,
}
struct TypeVariableData<'tcx> {
value: TypeVariableValue<'tcx>,
diverging: bool
}
enum TypeVariableValue<'tcx> {
Known(Ty<'tcx>),
Bounded(Vec<Relation>),
}
pub struct Snapshot {
snapshot: sv::Snapshot
}
enum UndoEntry {
// The type of the var was specified.
SpecifyVar(ty::TyVid, Vec<Relation>),
Relate(ty::TyVid, ty::TyVid),
}
struct Delegate;
type Relation = (RelationDir, ty::TyVid);
#[deriving(PartialEq,Show)]
pub enum RelationDir {
SubtypeOf, SupertypeOf, EqTo
}
impl RelationDir {
fn opposite(self) -> RelationDir {
match self {
SubtypeOf => SupertypeOf,
SupertypeOf => SubtypeOf,
EqTo => EqTo
}
}
}
impl<'tcx> TypeVariableTable<'tcx> {
pub fn new() -> TypeVariableTable<'tcx> {
TypeVariableTable { values: sv::SnapshotVec::new(Delegate) }
}
fn relations<'a>(&'a mut self, a: ty::TyVid) -> &'a mut Vec<Relation> {
relations(self.values.get_mut(a.index))
}
pub fn var_diverges<'a>(&'a self, vid: ty::TyVid) -> bool {
self.values.get(vid.index).diverging
}
pub fn relate_vars(&mut self, a: ty::TyVid, dir: RelationDir, b: ty::TyVid) {
/*!
* Records that `a <: b`, `a :> b`, or `a == b`, depending on `dir`.
*
* Precondition: neither `a` nor `b` are known.
*/
if a!= b {
self.relations(a).push((dir, b));
self.relations(b).push((dir.opposite(), a));
self.values.record(Relate(a, b));
}
}
pub fn instantiate_and_push(
&mut self,
vid: ty::TyVid,
ty: Ty<'tcx>,
stack: &mut Vec<(Ty<'tcx>, RelationDir, ty::TyVid)>)
{
/*!
* Instantiates `vid` with the type `ty` and then pushes an
* entry onto `stack` for each of the relations of `vid` to
* other variables. The relations will have the form `(ty,
* dir, vid1)` where `vid1` is some other variable id.
*/
let old_value = {
let value_ptr = &mut self.values.get_mut(vid.index).value;
mem::replace(value_ptr, Known(ty))
};
let relations = match old_value {
Bounded(b) => b,
Known(_) => panic!("Asked to instantiate variable that is \
already instantiated")
};
for &(dir, vid) in relations.iter() {
stack.push((ty, dir, vid));
}
self.values.record(SpecifyVar(vid, relations));
}
pub fn new_var(&mut self, diverging: bool) -> ty::TyVid {
let index = self.values.push(TypeVariableData {
value: Bounded(vec![]),
diverging: diverging
});
ty::TyVid { index: index }
}
pub fn probe(&self, vid: ty::TyVid) -> Option<Ty<'tcx>> {
match self.values.get(vid.index).value {
Bounded(..) => None,
Known(t) => Some(t)
}
}
pub fn replace_if_possible(&self, t: Ty<'tcx>) -> Ty<'tcx> {
match t.sty {
ty::ty_infer(ty::TyVar(v)) => {
match self.probe(v) {
None => t,
Some(u) => u
}
}
_ => t,
}
}
pub fn | (&mut self) -> Snapshot {
Snapshot { snapshot: self.values.start_snapshot() }
}
pub fn rollback_to(&mut self, s: Snapshot) {
self.values.rollback_to(s.snapshot);
}
pub fn commit(&mut self, s: Snapshot) {
self.values.commit(s.snapshot);
}
}
impl<'tcx> sv::SnapshotVecDelegate<TypeVariableData<'tcx>,UndoEntry> for Delegate {
fn reverse(&mut self,
values: &mut Vec<TypeVariableData>,
action: UndoEntry) {
match action {
SpecifyVar(vid, relations) => {
values[vid.index].value = Bounded(relations);
}
Relate(a, b) => {
relations(&mut (*values)[a.index]).pop();
relations(&mut (*values)[b.index]).pop();
}
}
}
}
fn relations<'a>(v: &'a mut TypeVariableData) -> &'a mut Vec<Relation> {
match v.value {
Known(_) => panic!("var_sub_var: variable is known"),
Bounded(ref mut relations) => relations
}
}
| snapshot | identifier_name |
domparser.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::DOMParserBinding;
use dom::bindings::codegen::DOMParserBinding::SupportedTypeValues::{Text_html, Text_xml};
use dom::bindings::utils::{DOMString, Fallible, Reflector, Reflectable, reflect_dom_object};
use dom::document::{AbstractDocument, Document, XML};
use dom::element::HTMLHtmlElementTypeId;
use dom::htmldocument::HTMLDocument;
use dom::htmlelement::HTMLElement;
use dom::htmlhtmlelement::HTMLHtmlElement;
use dom::node::Node;
use dom::window::Window;
use js::jsapi::{JSContext, JSObject};
pub struct DOMParser {
owner: @mut Window, //XXXjdm Document instead?
reflector_: Reflector
}
impl DOMParser {
pub fn new_inherited(owner: @mut Window) -> DOMParser {
DOMParser {
owner: owner,
reflector_: Reflector::new()
}
}
pub fn new(owner: @mut Window) -> @mut DOMParser {
reflect_dom_object(@mut DOMParser::new_inherited(owner), owner,
DOMParserBinding::Wrap)
}
pub fn Constructor(owner: @mut Window) -> Fallible<@mut DOMParser> {
Ok(DOMParser::new(owner))
}
pub fn ParseFromString(&self,
_s: &DOMString,
ty: DOMParserBinding::SupportedType)
-> Fallible<AbstractDocument> {
let cx = self.owner.get_cx();
let document = match ty {
Text_html => {
HTMLDocument::new(self.owner)
}
Text_xml => {
AbstractDocument::as_abstract(cx, @mut Document::new(self.owner, XML))
}
_ => {
fail!("unsupported document type")
}
};
let root = @HTMLHtmlElement {
htmlelement: HTMLElement::new(HTMLHtmlElementTypeId, ~"html", document)
};
let root = unsafe { Node::as_abstract_node(cx, root) };
document.set_root(root);
Ok(document)
}
}
impl Reflectable for DOMParser {
fn reflector<'a>(&'a self) -> &'a Reflector {
&self.reflector_
}
fn mut_reflector<'a>(&'a mut self) -> &'a mut Reflector { | unreachable!();
}
fn GetParentObject(&self, _cx: *JSContext) -> Option<@mut Reflectable> {
Some(self.owner as @mut Reflectable)
}
} | &mut self.reflector_
}
fn wrap_object_shared(@mut self, _cx: *JSContext, _scope: *JSObject) -> *JSObject { | random_line_split |
domparser.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::DOMParserBinding;
use dom::bindings::codegen::DOMParserBinding::SupportedTypeValues::{Text_html, Text_xml};
use dom::bindings::utils::{DOMString, Fallible, Reflector, Reflectable, reflect_dom_object};
use dom::document::{AbstractDocument, Document, XML};
use dom::element::HTMLHtmlElementTypeId;
use dom::htmldocument::HTMLDocument;
use dom::htmlelement::HTMLElement;
use dom::htmlhtmlelement::HTMLHtmlElement;
use dom::node::Node;
use dom::window::Window;
use js::jsapi::{JSContext, JSObject};
pub struct DOMParser {
owner: @mut Window, //XXXjdm Document instead?
reflector_: Reflector
}
impl DOMParser {
pub fn new_inherited(owner: @mut Window) -> DOMParser {
DOMParser {
owner: owner,
reflector_: Reflector::new()
}
}
pub fn new(owner: @mut Window) -> @mut DOMParser {
reflect_dom_object(@mut DOMParser::new_inherited(owner), owner,
DOMParserBinding::Wrap)
}
pub fn Constructor(owner: @mut Window) -> Fallible<@mut DOMParser> {
Ok(DOMParser::new(owner))
}
pub fn ParseFromString(&self,
_s: &DOMString,
ty: DOMParserBinding::SupportedType)
-> Fallible<AbstractDocument> {
let cx = self.owner.get_cx();
let document = match ty {
Text_html => |
Text_xml => {
AbstractDocument::as_abstract(cx, @mut Document::new(self.owner, XML))
}
_ => {
fail!("unsupported document type")
}
};
let root = @HTMLHtmlElement {
htmlelement: HTMLElement::new(HTMLHtmlElementTypeId, ~"html", document)
};
let root = unsafe { Node::as_abstract_node(cx, root) };
document.set_root(root);
Ok(document)
}
}
impl Reflectable for DOMParser {
fn reflector<'a>(&'a self) -> &'a Reflector {
&self.reflector_
}
fn mut_reflector<'a>(&'a mut self) -> &'a mut Reflector {
&mut self.reflector_
}
fn wrap_object_shared(@mut self, _cx: *JSContext, _scope: *JSObject) -> *JSObject {
unreachable!();
}
fn GetParentObject(&self, _cx: *JSContext) -> Option<@mut Reflectable> {
Some(self.owner as @mut Reflectable)
}
}
| {
HTMLDocument::new(self.owner)
} | conditional_block |
domparser.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::DOMParserBinding;
use dom::bindings::codegen::DOMParserBinding::SupportedTypeValues::{Text_html, Text_xml};
use dom::bindings::utils::{DOMString, Fallible, Reflector, Reflectable, reflect_dom_object};
use dom::document::{AbstractDocument, Document, XML};
use dom::element::HTMLHtmlElementTypeId;
use dom::htmldocument::HTMLDocument;
use dom::htmlelement::HTMLElement;
use dom::htmlhtmlelement::HTMLHtmlElement;
use dom::node::Node;
use dom::window::Window;
use js::jsapi::{JSContext, JSObject};
pub struct DOMParser {
owner: @mut Window, //XXXjdm Document instead?
reflector_: Reflector
}
impl DOMParser {
pub fn new_inherited(owner: @mut Window) -> DOMParser |
pub fn new(owner: @mut Window) -> @mut DOMParser {
reflect_dom_object(@mut DOMParser::new_inherited(owner), owner,
DOMParserBinding::Wrap)
}
pub fn Constructor(owner: @mut Window) -> Fallible<@mut DOMParser> {
Ok(DOMParser::new(owner))
}
pub fn ParseFromString(&self,
_s: &DOMString,
ty: DOMParserBinding::SupportedType)
-> Fallible<AbstractDocument> {
let cx = self.owner.get_cx();
let document = match ty {
Text_html => {
HTMLDocument::new(self.owner)
}
Text_xml => {
AbstractDocument::as_abstract(cx, @mut Document::new(self.owner, XML))
}
_ => {
fail!("unsupported document type")
}
};
let root = @HTMLHtmlElement {
htmlelement: HTMLElement::new(HTMLHtmlElementTypeId, ~"html", document)
};
let root = unsafe { Node::as_abstract_node(cx, root) };
document.set_root(root);
Ok(document)
}
}
impl Reflectable for DOMParser {
fn reflector<'a>(&'a self) -> &'a Reflector {
&self.reflector_
}
fn mut_reflector<'a>(&'a mut self) -> &'a mut Reflector {
&mut self.reflector_
}
fn wrap_object_shared(@mut self, _cx: *JSContext, _scope: *JSObject) -> *JSObject {
unreachable!();
}
fn GetParentObject(&self, _cx: *JSContext) -> Option<@mut Reflectable> {
Some(self.owner as @mut Reflectable)
}
}
| {
DOMParser {
owner: owner,
reflector_: Reflector::new()
}
} | identifier_body |
domparser.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::DOMParserBinding;
use dom::bindings::codegen::DOMParserBinding::SupportedTypeValues::{Text_html, Text_xml};
use dom::bindings::utils::{DOMString, Fallible, Reflector, Reflectable, reflect_dom_object};
use dom::document::{AbstractDocument, Document, XML};
use dom::element::HTMLHtmlElementTypeId;
use dom::htmldocument::HTMLDocument;
use dom::htmlelement::HTMLElement;
use dom::htmlhtmlelement::HTMLHtmlElement;
use dom::node::Node;
use dom::window::Window;
use js::jsapi::{JSContext, JSObject};
pub struct DOMParser {
owner: @mut Window, //XXXjdm Document instead?
reflector_: Reflector
}
impl DOMParser {
pub fn new_inherited(owner: @mut Window) -> DOMParser {
DOMParser {
owner: owner,
reflector_: Reflector::new()
}
}
pub fn new(owner: @mut Window) -> @mut DOMParser {
reflect_dom_object(@mut DOMParser::new_inherited(owner), owner,
DOMParserBinding::Wrap)
}
pub fn Constructor(owner: @mut Window) -> Fallible<@mut DOMParser> {
Ok(DOMParser::new(owner))
}
pub fn ParseFromString(&self,
_s: &DOMString,
ty: DOMParserBinding::SupportedType)
-> Fallible<AbstractDocument> {
let cx = self.owner.get_cx();
let document = match ty {
Text_html => {
HTMLDocument::new(self.owner)
}
Text_xml => {
AbstractDocument::as_abstract(cx, @mut Document::new(self.owner, XML))
}
_ => {
fail!("unsupported document type")
}
};
let root = @HTMLHtmlElement {
htmlelement: HTMLElement::new(HTMLHtmlElementTypeId, ~"html", document)
};
let root = unsafe { Node::as_abstract_node(cx, root) };
document.set_root(root);
Ok(document)
}
}
impl Reflectable for DOMParser {
fn reflector<'a>(&'a self) -> &'a Reflector {
&self.reflector_
}
fn mut_reflector<'a>(&'a mut self) -> &'a mut Reflector {
&mut self.reflector_
}
fn wrap_object_shared(@mut self, _cx: *JSContext, _scope: *JSObject) -> *JSObject {
unreachable!();
}
fn | (&self, _cx: *JSContext) -> Option<@mut Reflectable> {
Some(self.owner as @mut Reflectable)
}
}
| GetParentObject | identifier_name |
coercion.rs | mut things (when the expected is &mut T and you have &mut T). See
//! the various `src/test/run-pass/coerce-reborrow-*.rs` tests for
//! examples of where this is useful.
//!
//! ## Subtle note
//!
//! When deciding what type coercions to consider, we do not attempt to
//! resolve any type variables we may encounter. This is because `b`
//! represents the expected type "as the user wrote it", meaning that if
//! the user defined a generic function like
//!
//! fn foo<A>(a: A, b: A) {... }
//!
//! and then we wrote `foo(&1, @2)`, we will not auto-borrow
//! either argument. In older code we went to some lengths to
//! resolve the `b` variable, which could mean that we'd
//! auto-borrow later arguments but not earlier ones, which
//! seems very confusing.
//!
//! ## Subtler note
//!
//! However, right now, if the user manually specifies the
//! values for the type variables, as so:
//!
//! foo::<&int>(@1, @2)
//!
//! then we *will* auto-borrow, because we can't distinguish this from a
//! function that declared `&int`. This is inconsistent but it's easiest
//! at the moment. The right thing to do, I think, is to consider the
//! *unsubstituted* type when deciding whether to auto-borrow, but the
//! *substituted* type when considering the bounds and so forth. But most
//! of our methods don't give access to the unsubstituted type, and
//! rightly so because they'd be error-prone. So maybe the thing to do is
//! to actually determine the kind of coercions that should occur
//! separately and pass them in. Or maybe it's ok as is. Anyway, it's
//! sort of a minor point so I've opted to leave it for later---after all
//! we may want to adjust precisely when coercions occur.
use check::{autoderef, FnCtxt, NoPreference, PreferMutLvalue, UnresolvedTypeAction};
use middle::infer::{self, Coercion};
use middle::traits::{self, ObligationCause};
use middle::traits::{predicate_for_trait_def, report_selection_error};
use middle::ty::{AutoDerefRef, AdjustDerefRef};
use middle::ty::{self, mt, Ty};
use middle::ty_relate::RelateResult;
use util::common::indent;
use util::ppaux::Repr;
use std::cell::RefCell;
use std::collections::VecDeque;
use syntax::ast;
struct Coerce<'a, 'tcx: 'a> {
fcx: &'a FnCtxt<'a, 'tcx>,
origin: infer::TypeOrigin,
unsizing_obligations: RefCell<Vec<traits::PredicateObligation<'tcx>>>,
}
type CoerceResult<'tcx> = RelateResult<'tcx, Option<ty::AutoAdjustment<'tcx>>>;
impl<'f, 'tcx> Coerce<'f, 'tcx> {
fn tcx(&self) -> &ty::ctxt<'tcx> {
self.fcx.tcx()
}
fn subtype(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> {
try!(self.fcx.infcx().sub_types(false, self.origin.clone(), a, b));
Ok(None) // No coercion required.
}
fn unpack_actual_value<T, F>(&self, a: Ty<'tcx>, f: F) -> T where
F: FnOnce(Ty<'tcx>) -> T,
{
f(self.fcx.infcx().shallow_resolve(a))
}
fn coerce(&self,
expr_a: &ast::Expr,
a: Ty<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx> {
debug!("Coerce.tys({} => {})",
a.repr(self.tcx()),
b.repr(self.tcx()));
// Consider coercing the subtype to a DST
let unsize = self.unpack_actual_value(a, |a| {
self.coerce_unsized(a, b)
});
if unsize.is_ok() {
return unsize;
}
// Examine the supertype and consider auto-borrowing.
//
// Note: does not attempt to resolve type variables we encounter.
// See above for details.
match b.sty {
ty::ty_ptr(mt_b) => {
return self.unpack_actual_value(a, |a| {
self.coerce_unsafe_ptr(a, b, mt_b.mutbl)
});
}
ty::ty_rptr(_, mt_b) => {
return self.unpack_actual_value(a, |a| {
self.coerce_borrowed_pointer(expr_a, a, b, mt_b.mutbl)
});
}
_ => {}
}
self.unpack_actual_value(a, |a| {
match a.sty {
ty::ty_bare_fn(Some(_), a_f) => {
// Function items are coercible to any closure
// type; function pointers are not (that would
// require double indirection).
self.coerce_from_fn_item(a, a_f, b)
}
ty::ty_bare_fn(None, a_f) => {
// We permit coercion of fn pointers to drop the
// unsafe qualifier.
self.coerce_from_fn_pointer(a, a_f, b)
}
_ => {
// Otherwise, just use subtyping rules.
self.subtype(a, b)
}
}
})
}
/// Reborrows `&mut A` to `&mut B` and `&(mut) A` to `&B`.
/// To match `A` with `B`, autoderef will be performed,
/// calling `deref`/`deref_mut` where necessary.
fn coerce_borrowed_pointer(&self,
expr_a: &ast::Expr,
a: Ty<'tcx>,
b: Ty<'tcx>,
mutbl_b: ast::Mutability)
-> CoerceResult<'tcx> {
debug!("coerce_borrowed_pointer(a={}, b={})",
a.repr(self.tcx()),
b.repr(self.tcx()));
// If we have a parameter of type `&M T_a` and the value
// provided is `expr`, we will be adding an implicit borrow,
// meaning that we convert `f(expr)` to `f(&M *expr)`. Therefore,
// to type check, we will construct the type that `&M*expr` would
// yield.
match a.sty {
ty::ty_rptr(_, mt_a) => {
try!(coerce_mutbls(mt_a.mutbl, mutbl_b));
}
_ => return self.subtype(a, b)
}
let coercion = Coercion(self.origin.span());
let r_borrow = self.fcx.infcx().next_region_var(coercion);
let r_borrow = self.tcx().mk_region(r_borrow);
let autoref = Some(ty::AutoPtr(r_borrow, mutbl_b));
let lvalue_pref = match mutbl_b {
ast::MutMutable => PreferMutLvalue,
ast::MutImmutable => NoPreference
};
let mut first_error = None;
let (_, autoderefs, success) = autoderef(self.fcx,
expr_a.span,
a,
Some(expr_a),
UnresolvedTypeAction::Ignore,
lvalue_pref,
|inner_ty, autoderef| {
if autoderef == 0 {
// Don't let this pass, otherwise it would cause
// &T to autoref to &&T.
return None;
}
let ty = ty::mk_rptr(self.tcx(), r_borrow,
mt {ty: inner_ty, mutbl: mutbl_b});
if let Err(err) = self.subtype(ty, b) {
if first_error.is_none() {
first_error = Some(err);
}
None
} else {
Some(())
}
});
match success {
Some(_) => {
Ok(Some(AdjustDerefRef(AutoDerefRef {
autoderefs: autoderefs,
autoref: autoref,
unsize: None
})))
}
None => {
// Return original error as if overloaded deref was never
// attempted, to avoid irrelevant/confusing error messages.
Err(first_error.expect("coerce_borrowed_pointer failed with no error?"))
}
}
}
// &[T,..n] or &mut [T,..n] -> &[T]
// or &mut [T,..n] -> &mut [T]
// or &Concrete -> &Trait, etc.
fn coerce_unsized(&self,
source: Ty<'tcx>,
target: Ty<'tcx>)
-> CoerceResult<'tcx> {
debug!("coerce_unsized(source={}, target={})",
source.repr(self.tcx()),
target.repr(self.tcx()));
let traits = (self.tcx().lang_items.unsize_trait(),
self.tcx().lang_items.coerce_unsized_trait());
let (unsize_did, coerce_unsized_did) = if let (Some(u), Some(cu)) = traits {
(u, cu)
} else {
debug!("Missing Unsize or CoerceUnsized traits");
return Err(ty::terr_mismatch);
};
// Note, we want to avoid unnecessary unsizing. We don't want to coerce to
// a DST unless we have to. This currently comes out in the wash since
// we can't unify [T] with U. But to properly support DST, we need to allow
// that, at which point we will need extra checks on the target here.
// Handle reborrows before selecting `Source: CoerceUnsized<Target>`.
let (source, reborrow) = match (&source.sty, &target.sty) {
(&ty::ty_rptr(_, mt_a), &ty::ty_rptr(_, mt_b)) => {
try!(coerce_mutbls(mt_a.mutbl, mt_b.mutbl));
let coercion = Coercion(self.origin.span());
let r_borrow = self.fcx.infcx().next_region_var(coercion);
let region = self.tcx().mk_region(r_borrow);
(mt_a.ty, Some(ty::AutoPtr(region, mt_b.mutbl)))
}
(&ty::ty_rptr(_, mt_a), &ty::ty_ptr(mt_b)) => {
try!(coerce_mutbls(mt_a.mutbl, mt_b.mutbl));
(mt_a.ty, Some(ty::AutoUnsafe(mt_b.mutbl)))
}
_ => (source, None)
};
let source = ty::adjust_ty_for_autoref(self.tcx(), source, reborrow);
let mut selcx = traits::SelectionContext::new(self.fcx.infcx(), self.fcx);
// Use a FIFO queue for this custom fulfillment procedure.
let mut queue = VecDeque::new();
let mut leftover_predicates = vec![];
// Create an obligation for `Source: CoerceUnsized<Target>`.
let cause = ObligationCause::misc(self.origin.span(), self.fcx.body_id);
queue.push_back(predicate_for_trait_def(self.tcx(),
cause,
coerce_unsized_did,
0,
source,
vec![target]));
// Keep resolving `CoerceUnsized` and `Unsize` predicates to avoid
// emitting a coercion in cases like `Foo<$1>` -> `Foo<$2>`, where
// inference might unify those two inner type variables later.
let traits = [coerce_unsized_did, unsize_did];
while let Some(obligation) = queue.pop_front() {
debug!("coerce_unsized resolve step: {}", obligation.repr(self.tcx()));
let trait_ref = match obligation.predicate {
ty::Predicate::Trait(ref tr) if traits.contains(&tr.def_id()) => {
tr.clone()
}
_ => {
leftover_predicates.push(obligation);
continue;
}
};
match selcx.select(&obligation.with(trait_ref)) {
// Uncertain or unimplemented.
Ok(None) | Err(traits::Unimplemented) => {
debug!("coerce_unsized: early return - can't prove obligation");
return Err(ty::terr_mismatch);
}
// Object safety violations or miscellaneous.
Err(err) => {
report_selection_error(self.fcx.infcx(), &obligation, &err);
// Treat this like an obligation and follow through
// with the unsizing - the lack of a coercion should
// be silent, as it causes a type mismatch later.
}
Ok(Some(vtable)) => {
vtable.map_move_nested(|o| queue.push_back(o));
}
}
}
let mut obligations = self.unsizing_obligations.borrow_mut();
assert!(obligations.is_empty());
*obligations = leftover_predicates;
let adjustment = AutoDerefRef {
autoderefs: if reborrow.is_some() { 1 } else { 0 },
autoref: reborrow,
unsize: Some(target)
};
debug!("Success, coerced with {}", adjustment.repr(self.tcx()));
Ok(Some(AdjustDerefRef(adjustment)))
}
fn coerce_from_fn_pointer(&self,
a: Ty<'tcx>,
fn_ty_a: &'tcx ty::BareFnTy<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx>
{
/*!
* Attempts to coerce from the type of a Rust function item
* into a closure or a `proc`.
*/
self.unpack_actual_value(b, |b| {
debug!("coerce_from_fn_pointer(a={}, b={})",
a.repr(self.tcx()), b.repr(self.tcx()));
if let ty::ty_bare_fn(None, fn_ty_b) = b.sty {
match (fn_ty_a.unsafety, fn_ty_b.unsafety) {
(ast::Unsafety::Normal, ast::Unsafety::Unsafe) => {
let unsafe_a = self.tcx().safe_to_unsafe_fn_ty(fn_ty_a);
try!(self.subtype(unsafe_a, b));
return Ok(Some(ty::AdjustUnsafeFnPointer));
}
_ => {}
}
}
self.subtype(a, b)
})
}
fn coerce_from_fn_item(&self,
a: Ty<'tcx>,
fn_ty_a: &'tcx ty::BareFnTy<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx> {
/*!
* Attempts to coerce from the type of a Rust function item
* into a closure or a `proc`.
*/
self.unpack_actual_value(b, |b| {
debug!("coerce_from_fn_item(a={}, b={})",
a.repr(self.tcx()), b.repr(self.tcx()));
match b.sty {
ty::ty_bare_fn(None, _) => {
let a_fn_pointer = ty::mk_bare_fn(self.tcx(), None, fn_ty_a);
try!(self.subtype(a_fn_pointer, b));
Ok(Some(ty::AdjustReifyFnPointer))
}
_ => self.subtype(a, b)
}
})
}
fn | (&self,
a: Ty<'tcx>,
b: Ty<'tcx>,
mutbl_b: ast::Mutability)
-> CoerceResult<'tcx> {
debug!("coerce_unsafe_ptr(a={}, b={})",
a.repr(self.tcx()),
b.repr(self.tcx()));
let mt_a = match a.sty {
ty::ty_rptr(_, mt) | ty::ty_ptr(mt) => mt,
_ => {
return self.subtype(a, b);
}
};
// Check that the types which they point at are compatible.
let a_unsafe = ty::mk_ptr(self.tcx(), ty::mt{ mutbl: mutbl_b, ty: mt_a.ty });
try!(self.subtype(a_unsafe, b));
try!(coerce_mutbls(mt_a.mutbl, mutbl_b));
// Although references and unsafe ptrs have the same
// representation, we still register an AutoDerefRef so that
// regionck knows that the region for `a` must be valid here.
Ok(Some(AdjustDerefRef(AutoDerefRef {
autoderefs: 1,
autoref: Some(ty::AutoUnsafe(mutbl_b)),
unsize: None
})))
}
}
pub fn mk_assignty<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
expr: &ast::Expr,
a: Ty<'tcx>,
b: Ty<'tcx>)
| coerce_unsafe_ptr | identifier_name |
coercion.rs | //! of mut things (when the expected is &mut T and you have &mut T). See
//! the various `src/test/run-pass/coerce-reborrow-*.rs` tests for
//! examples of where this is useful.
//!
//! ## Subtle note
//!
//! When deciding what type coercions to consider, we do not attempt to
//! resolve any type variables we may encounter. This is because `b`
//! represents the expected type "as the user wrote it", meaning that if
//! the user defined a generic function like
//!
//! fn foo<A>(a: A, b: A) {... }
//!
//! and then we wrote `foo(&1, @2)`, we will not auto-borrow
//! either argument. In older code we went to some lengths to
//! resolve the `b` variable, which could mean that we'd
//! auto-borrow later arguments but not earlier ones, which
//! seems very confusing.
//!
//! ## Subtler note
//!
//! However, right now, if the user manually specifies the
//! values for the type variables, as so:
//!
//! foo::<&int>(@1, @2)
//!
//! then we *will* auto-borrow, because we can't distinguish this from a
//! function that declared `&int`. This is inconsistent but it's easiest
//! at the moment. The right thing to do, I think, is to consider the
//! *unsubstituted* type when deciding whether to auto-borrow, but the
//! *substituted* type when considering the bounds and so forth. But most
//! of our methods don't give access to the unsubstituted type, and
//! rightly so because they'd be error-prone. So maybe the thing to do is
//! to actually determine the kind of coercions that should occur
//! separately and pass them in. Or maybe it's ok as is. Anyway, it's
//! sort of a minor point so I've opted to leave it for later---after all
//! we may want to adjust precisely when coercions occur.
use check::{autoderef, FnCtxt, NoPreference, PreferMutLvalue, UnresolvedTypeAction};
use middle::infer::{self, Coercion};
use middle::traits::{self, ObligationCause};
use middle::traits::{predicate_for_trait_def, report_selection_error};
use middle::ty::{AutoDerefRef, AdjustDerefRef};
use middle::ty::{self, mt, Ty};
use middle::ty_relate::RelateResult;
use util::common::indent;
use util::ppaux::Repr;
use std::cell::RefCell;
use std::collections::VecDeque;
use syntax::ast;
struct Coerce<'a, 'tcx: 'a> {
fcx: &'a FnCtxt<'a, 'tcx>,
origin: infer::TypeOrigin,
unsizing_obligations: RefCell<Vec<traits::PredicateObligation<'tcx>>>,
}
type CoerceResult<'tcx> = RelateResult<'tcx, Option<ty::AutoAdjustment<'tcx>>>;
impl<'f, 'tcx> Coerce<'f, 'tcx> {
fn tcx(&self) -> &ty::ctxt<'tcx> {
self.fcx.tcx()
}
fn subtype(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> {
try!(self.fcx.infcx().sub_types(false, self.origin.clone(), a, b));
Ok(None) // No coercion required.
}
fn unpack_actual_value<T, F>(&self, a: Ty<'tcx>, f: F) -> T where
F: FnOnce(Ty<'tcx>) -> T,
{
f(self.fcx.infcx().shallow_resolve(a))
}
fn coerce(&self,
expr_a: &ast::Expr,
a: Ty<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx> {
debug!("Coerce.tys({} => {})", | self.coerce_unsized(a, b)
});
if unsize.is_ok() {
return unsize;
}
// Examine the supertype and consider auto-borrowing.
//
// Note: does not attempt to resolve type variables we encounter.
// See above for details.
match b.sty {
ty::ty_ptr(mt_b) => {
return self.unpack_actual_value(a, |a| {
self.coerce_unsafe_ptr(a, b, mt_b.mutbl)
});
}
ty::ty_rptr(_, mt_b) => {
return self.unpack_actual_value(a, |a| {
self.coerce_borrowed_pointer(expr_a, a, b, mt_b.mutbl)
});
}
_ => {}
}
self.unpack_actual_value(a, |a| {
match a.sty {
ty::ty_bare_fn(Some(_), a_f) => {
// Function items are coercible to any closure
// type; function pointers are not (that would
// require double indirection).
self.coerce_from_fn_item(a, a_f, b)
}
ty::ty_bare_fn(None, a_f) => {
// We permit coercion of fn pointers to drop the
// unsafe qualifier.
self.coerce_from_fn_pointer(a, a_f, b)
}
_ => {
// Otherwise, just use subtyping rules.
self.subtype(a, b)
}
}
})
}
/// Reborrows `&mut A` to `&mut B` and `&(mut) A` to `&B`.
/// To match `A` with `B`, autoderef will be performed,
/// calling `deref`/`deref_mut` where necessary.
fn coerce_borrowed_pointer(&self,
expr_a: &ast::Expr,
a: Ty<'tcx>,
b: Ty<'tcx>,
mutbl_b: ast::Mutability)
-> CoerceResult<'tcx> {
debug!("coerce_borrowed_pointer(a={}, b={})",
a.repr(self.tcx()),
b.repr(self.tcx()));
// If we have a parameter of type `&M T_a` and the value
// provided is `expr`, we will be adding an implicit borrow,
// meaning that we convert `f(expr)` to `f(&M *expr)`. Therefore,
// to type check, we will construct the type that `&M*expr` would
// yield.
match a.sty {
ty::ty_rptr(_, mt_a) => {
try!(coerce_mutbls(mt_a.mutbl, mutbl_b));
}
_ => return self.subtype(a, b)
}
let coercion = Coercion(self.origin.span());
let r_borrow = self.fcx.infcx().next_region_var(coercion);
let r_borrow = self.tcx().mk_region(r_borrow);
let autoref = Some(ty::AutoPtr(r_borrow, mutbl_b));
let lvalue_pref = match mutbl_b {
ast::MutMutable => PreferMutLvalue,
ast::MutImmutable => NoPreference
};
let mut first_error = None;
let (_, autoderefs, success) = autoderef(self.fcx,
expr_a.span,
a,
Some(expr_a),
UnresolvedTypeAction::Ignore,
lvalue_pref,
|inner_ty, autoderef| {
if autoderef == 0 {
// Don't let this pass, otherwise it would cause
// &T to autoref to &&T.
return None;
}
let ty = ty::mk_rptr(self.tcx(), r_borrow,
mt {ty: inner_ty, mutbl: mutbl_b});
if let Err(err) = self.subtype(ty, b) {
if first_error.is_none() {
first_error = Some(err);
}
None
} else {
Some(())
}
});
match success {
Some(_) => {
Ok(Some(AdjustDerefRef(AutoDerefRef {
autoderefs: autoderefs,
autoref: autoref,
unsize: None
})))
}
None => {
// Return original error as if overloaded deref was never
// attempted, to avoid irrelevant/confusing error messages.
Err(first_error.expect("coerce_borrowed_pointer failed with no error?"))
}
}
}
// &[T,..n] or &mut [T,..n] -> &[T]
// or &mut [T,..n] -> &mut [T]
// or &Concrete -> &Trait, etc.
fn coerce_unsized(&self,
source: Ty<'tcx>,
target: Ty<'tcx>)
-> CoerceResult<'tcx> {
debug!("coerce_unsized(source={}, target={})",
source.repr(self.tcx()),
target.repr(self.tcx()));
let traits = (self.tcx().lang_items.unsize_trait(),
self.tcx().lang_items.coerce_unsized_trait());
let (unsize_did, coerce_unsized_did) = if let (Some(u), Some(cu)) = traits {
(u, cu)
} else {
debug!("Missing Unsize or CoerceUnsized traits");
return Err(ty::terr_mismatch);
};
// Note, we want to avoid unnecessary unsizing. We don't want to coerce to
// a DST unless we have to. This currently comes out in the wash since
// we can't unify [T] with U. But to properly support DST, we need to allow
// that, at which point we will need extra checks on the target here.
// Handle reborrows before selecting `Source: CoerceUnsized<Target>`.
let (source, reborrow) = match (&source.sty, &target.sty) {
(&ty::ty_rptr(_, mt_a), &ty::ty_rptr(_, mt_b)) => {
try!(coerce_mutbls(mt_a.mutbl, mt_b.mutbl));
let coercion = Coercion(self.origin.span());
let r_borrow = self.fcx.infcx().next_region_var(coercion);
let region = self.tcx().mk_region(r_borrow);
(mt_a.ty, Some(ty::AutoPtr(region, mt_b.mutbl)))
}
(&ty::ty_rptr(_, mt_a), &ty::ty_ptr(mt_b)) => {
try!(coerce_mutbls(mt_a.mutbl, mt_b.mutbl));
(mt_a.ty, Some(ty::AutoUnsafe(mt_b.mutbl)))
}
_ => (source, None)
};
let source = ty::adjust_ty_for_autoref(self.tcx(), source, reborrow);
let mut selcx = traits::SelectionContext::new(self.fcx.infcx(), self.fcx);
// Use a FIFO queue for this custom fulfillment procedure.
let mut queue = VecDeque::new();
let mut leftover_predicates = vec![];
// Create an obligation for `Source: CoerceUnsized<Target>`.
let cause = ObligationCause::misc(self.origin.span(), self.fcx.body_id);
queue.push_back(predicate_for_trait_def(self.tcx(),
cause,
coerce_unsized_did,
0,
source,
vec![target]));
// Keep resolving `CoerceUnsized` and `Unsize` predicates to avoid
// emitting a coercion in cases like `Foo<$1>` -> `Foo<$2>`, where
// inference might unify those two inner type variables later.
let traits = [coerce_unsized_did, unsize_did];
while let Some(obligation) = queue.pop_front() {
debug!("coerce_unsized resolve step: {}", obligation.repr(self.tcx()));
let trait_ref = match obligation.predicate {
ty::Predicate::Trait(ref tr) if traits.contains(&tr.def_id()) => {
tr.clone()
}
_ => {
leftover_predicates.push(obligation);
continue;
}
};
match selcx.select(&obligation.with(trait_ref)) {
// Uncertain or unimplemented.
Ok(None) | Err(traits::Unimplemented) => {
debug!("coerce_unsized: early return - can't prove obligation");
return Err(ty::terr_mismatch);
}
// Object safety violations or miscellaneous.
Err(err) => {
report_selection_error(self.fcx.infcx(), &obligation, &err);
// Treat this like an obligation and follow through
// with the unsizing - the lack of a coercion should
// be silent, as it causes a type mismatch later.
}
Ok(Some(vtable)) => {
vtable.map_move_nested(|o| queue.push_back(o));
}
}
}
let mut obligations = self.unsizing_obligations.borrow_mut();
assert!(obligations.is_empty());
*obligations = leftover_predicates;
let adjustment = AutoDerefRef {
autoderefs: if reborrow.is_some() { 1 } else { 0 },
autoref: reborrow,
unsize: Some(target)
};
debug!("Success, coerced with {}", adjustment.repr(self.tcx()));
Ok(Some(AdjustDerefRef(adjustment)))
}
fn coerce_from_fn_pointer(&self,
a: Ty<'tcx>,
fn_ty_a: &'tcx ty::BareFnTy<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx>
{
/*!
* Attempts to coerce from the type of a Rust function item
* into a closure or a `proc`.
*/
self.unpack_actual_value(b, |b| {
debug!("coerce_from_fn_pointer(a={}, b={})",
a.repr(self.tcx()), b.repr(self.tcx()));
if let ty::ty_bare_fn(None, fn_ty_b) = b.sty {
match (fn_ty_a.unsafety, fn_ty_b.unsafety) {
(ast::Unsafety::Normal, ast::Unsafety::Unsafe) => {
let unsafe_a = self.tcx().safe_to_unsafe_fn_ty(fn_ty_a);
try!(self.subtype(unsafe_a, b));
return Ok(Some(ty::AdjustUnsafeFnPointer));
}
_ => {}
}
}
self.subtype(a, b)
})
}
fn coerce_from_fn_item(&self,
a: Ty<'tcx>,
fn_ty_a: &'tcx ty::BareFnTy<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx> {
/*!
* Attempts to coerce from the type of a Rust function item
* into a closure or a `proc`.
*/
self.unpack_actual_value(b, |b| {
debug!("coerce_from_fn_item(a={}, b={})",
a.repr(self.tcx()), b.repr(self.tcx()));
match b.sty {
ty::ty_bare_fn(None, _) => {
let a_fn_pointer = ty::mk_bare_fn(self.tcx(), None, fn_ty_a);
try!(self.subtype(a_fn_pointer, b));
Ok(Some(ty::AdjustReifyFnPointer))
}
_ => self.subtype(a, b)
}
})
}
fn coerce_unsafe_ptr(&self,
a: Ty<'tcx>,
b: Ty<'tcx>,
mutbl_b: ast::Mutability)
-> CoerceResult<'tcx> {
debug!("coerce_unsafe_ptr(a={}, b={})",
a.repr(self.tcx()),
b.repr(self.tcx()));
let mt_a = match a.sty {
ty::ty_rptr(_, mt) | ty::ty_ptr(mt) => mt,
_ => {
return self.subtype(a, b);
}
};
// Check that the types which they point at are compatible.
let a_unsafe = ty::mk_ptr(self.tcx(), ty::mt{ mutbl: mutbl_b, ty: mt_a.ty });
try!(self.subtype(a_unsafe, b));
try!(coerce_mutbls(mt_a.mutbl, mutbl_b));
// Although references and unsafe ptrs have the same
// representation, we still register an AutoDerefRef so that
// regionck knows that the region for `a` must be valid here.
Ok(Some(AdjustDerefRef(AutoDerefRef {
autoderefs: 1,
autoref: Some(ty::AutoUnsafe(mutbl_b)),
unsize: None
})))
}
}
pub fn mk_assignty<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
expr: &ast::Expr,
a: Ty<'tcx>,
b: Ty<'tcx>)
-> | a.repr(self.tcx()),
b.repr(self.tcx()));
// Consider coercing the subtype to a DST
let unsize = self.unpack_actual_value(a, |a| { | random_line_split |
coercion.rs | mut things (when the expected is &mut T and you have &mut T). See
//! the various `src/test/run-pass/coerce-reborrow-*.rs` tests for
//! examples of where this is useful.
//!
//! ## Subtle note
//!
//! When deciding what type coercions to consider, we do not attempt to
//! resolve any type variables we may encounter. This is because `b`
//! represents the expected type "as the user wrote it", meaning that if
//! the user defined a generic function like
//!
//! fn foo<A>(a: A, b: A) {... }
//!
//! and then we wrote `foo(&1, @2)`, we will not auto-borrow
//! either argument. In older code we went to some lengths to
//! resolve the `b` variable, which could mean that we'd
//! auto-borrow later arguments but not earlier ones, which
//! seems very confusing.
//!
//! ## Subtler note
//!
//! However, right now, if the user manually specifies the
//! values for the type variables, as so:
//!
//! foo::<&int>(@1, @2)
//!
//! then we *will* auto-borrow, because we can't distinguish this from a
//! function that declared `&int`. This is inconsistent but it's easiest
//! at the moment. The right thing to do, I think, is to consider the
//! *unsubstituted* type when deciding whether to auto-borrow, but the
//! *substituted* type when considering the bounds and so forth. But most
//! of our methods don't give access to the unsubstituted type, and
//! rightly so because they'd be error-prone. So maybe the thing to do is
//! to actually determine the kind of coercions that should occur
//! separately and pass them in. Or maybe it's ok as is. Anyway, it's
//! sort of a minor point so I've opted to leave it for later---after all
//! we may want to adjust precisely when coercions occur.
use check::{autoderef, FnCtxt, NoPreference, PreferMutLvalue, UnresolvedTypeAction};
use middle::infer::{self, Coercion};
use middle::traits::{self, ObligationCause};
use middle::traits::{predicate_for_trait_def, report_selection_error};
use middle::ty::{AutoDerefRef, AdjustDerefRef};
use middle::ty::{self, mt, Ty};
use middle::ty_relate::RelateResult;
use util::common::indent;
use util::ppaux::Repr;
use std::cell::RefCell;
use std::collections::VecDeque;
use syntax::ast;
struct Coerce<'a, 'tcx: 'a> {
fcx: &'a FnCtxt<'a, 'tcx>,
origin: infer::TypeOrigin,
unsizing_obligations: RefCell<Vec<traits::PredicateObligation<'tcx>>>,
}
type CoerceResult<'tcx> = RelateResult<'tcx, Option<ty::AutoAdjustment<'tcx>>>;
impl<'f, 'tcx> Coerce<'f, 'tcx> {
fn tcx(&self) -> &ty::ctxt<'tcx> {
self.fcx.tcx()
}
fn subtype(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> {
try!(self.fcx.infcx().sub_types(false, self.origin.clone(), a, b));
Ok(None) // No coercion required.
}
fn unpack_actual_value<T, F>(&self, a: Ty<'tcx>, f: F) -> T where
F: FnOnce(Ty<'tcx>) -> T,
{
f(self.fcx.infcx().shallow_resolve(a))
}
fn coerce(&self,
expr_a: &ast::Expr,
a: Ty<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx> {
debug!("Coerce.tys({} => {})",
a.repr(self.tcx()),
b.repr(self.tcx()));
// Consider coercing the subtype to a DST
let unsize = self.unpack_actual_value(a, |a| {
self.coerce_unsized(a, b)
});
if unsize.is_ok() {
return unsize;
}
// Examine the supertype and consider auto-borrowing.
//
// Note: does not attempt to resolve type variables we encounter.
// See above for details.
match b.sty {
ty::ty_ptr(mt_b) => {
return self.unpack_actual_value(a, |a| {
self.coerce_unsafe_ptr(a, b, mt_b.mutbl)
});
}
ty::ty_rptr(_, mt_b) => {
return self.unpack_actual_value(a, |a| {
self.coerce_borrowed_pointer(expr_a, a, b, mt_b.mutbl)
});
}
_ => {}
}
self.unpack_actual_value(a, |a| {
match a.sty {
ty::ty_bare_fn(Some(_), a_f) => {
// Function items are coercible to any closure
// type; function pointers are not (that would
// require double indirection).
self.coerce_from_fn_item(a, a_f, b)
}
ty::ty_bare_fn(None, a_f) => {
// We permit coercion of fn pointers to drop the
// unsafe qualifier.
self.coerce_from_fn_pointer(a, a_f, b)
}
_ => {
// Otherwise, just use subtyping rules.
self.subtype(a, b)
}
}
})
}
/// Reborrows `&mut A` to `&mut B` and `&(mut) A` to `&B`.
/// To match `A` with `B`, autoderef will be performed,
/// calling `deref`/`deref_mut` where necessary.
fn coerce_borrowed_pointer(&self,
expr_a: &ast::Expr,
a: Ty<'tcx>,
b: Ty<'tcx>,
mutbl_b: ast::Mutability)
-> CoerceResult<'tcx> {
debug!("coerce_borrowed_pointer(a={}, b={})",
a.repr(self.tcx()),
b.repr(self.tcx()));
// If we have a parameter of type `&M T_a` and the value
// provided is `expr`, we will be adding an implicit borrow,
// meaning that we convert `f(expr)` to `f(&M *expr)`. Therefore,
// to type check, we will construct the type that `&M*expr` would
// yield.
match a.sty {
ty::ty_rptr(_, mt_a) => {
try!(coerce_mutbls(mt_a.mutbl, mutbl_b));
}
_ => return self.subtype(a, b)
}
let coercion = Coercion(self.origin.span());
let r_borrow = self.fcx.infcx().next_region_var(coercion);
let r_borrow = self.tcx().mk_region(r_borrow);
let autoref = Some(ty::AutoPtr(r_borrow, mutbl_b));
let lvalue_pref = match mutbl_b {
ast::MutMutable => PreferMutLvalue,
ast::MutImmutable => NoPreference
};
let mut first_error = None;
let (_, autoderefs, success) = autoderef(self.fcx,
expr_a.span,
a,
Some(expr_a),
UnresolvedTypeAction::Ignore,
lvalue_pref,
|inner_ty, autoderef| {
if autoderef == 0 {
// Don't let this pass, otherwise it would cause
// &T to autoref to &&T.
return None;
}
let ty = ty::mk_rptr(self.tcx(), r_borrow,
mt {ty: inner_ty, mutbl: mutbl_b});
if let Err(err) = self.subtype(ty, b) {
if first_error.is_none() {
first_error = Some(err);
}
None
} else {
Some(())
}
});
match success {
Some(_) => {
Ok(Some(AdjustDerefRef(AutoDerefRef {
autoderefs: autoderefs,
autoref: autoref,
unsize: None
})))
}
None => {
// Return original error as if overloaded deref was never
// attempted, to avoid irrelevant/confusing error messages.
Err(first_error.expect("coerce_borrowed_pointer failed with no error?"))
}
}
}
// &[T,..n] or &mut [T,..n] -> &[T]
// or &mut [T,..n] -> &mut [T]
// or &Concrete -> &Trait, etc.
fn coerce_unsized(&self,
source: Ty<'tcx>,
target: Ty<'tcx>)
-> CoerceResult<'tcx> | let (source, reborrow) = match (&source.sty, &target.sty) {
(&ty::ty_rptr(_, mt_a), &ty::ty_rptr(_, mt_b)) => {
try!(coerce_mutbls(mt_a.mutbl, mt_b.mutbl));
let coercion = Coercion(self.origin.span());
let r_borrow = self.fcx.infcx().next_region_var(coercion);
let region = self.tcx().mk_region(r_borrow);
(mt_a.ty, Some(ty::AutoPtr(region, mt_b.mutbl)))
}
(&ty::ty_rptr(_, mt_a), &ty::ty_ptr(mt_b)) => {
try!(coerce_mutbls(mt_a.mutbl, mt_b.mutbl));
(mt_a.ty, Some(ty::AutoUnsafe(mt_b.mutbl)))
}
_ => (source, None)
};
let source = ty::adjust_ty_for_autoref(self.tcx(), source, reborrow);
let mut selcx = traits::SelectionContext::new(self.fcx.infcx(), self.fcx);
// Use a FIFO queue for this custom fulfillment procedure.
let mut queue = VecDeque::new();
let mut leftover_predicates = vec![];
// Create an obligation for `Source: CoerceUnsized<Target>`.
let cause = ObligationCause::misc(self.origin.span(), self.fcx.body_id);
queue.push_back(predicate_for_trait_def(self.tcx(),
cause,
coerce_unsized_did,
0,
source,
vec![target]));
// Keep resolving `CoerceUnsized` and `Unsize` predicates to avoid
// emitting a coercion in cases like `Foo<$1>` -> `Foo<$2>`, where
// inference might unify those two inner type variables later.
let traits = [coerce_unsized_did, unsize_did];
while let Some(obligation) = queue.pop_front() {
debug!("coerce_unsized resolve step: {}", obligation.repr(self.tcx()));
let trait_ref = match obligation.predicate {
ty::Predicate::Trait(ref tr) if traits.contains(&tr.def_id()) => {
tr.clone()
}
_ => {
leftover_predicates.push(obligation);
continue;
}
};
match selcx.select(&obligation.with(trait_ref)) {
// Uncertain or unimplemented.
Ok(None) | Err(traits::Unimplemented) => {
debug!("coerce_unsized: early return - can't prove obligation");
return Err(ty::terr_mismatch);
}
// Object safety violations or miscellaneous.
Err(err) => {
report_selection_error(self.fcx.infcx(), &obligation, &err);
// Treat this like an obligation and follow through
// with the unsizing - the lack of a coercion should
// be silent, as it causes a type mismatch later.
}
Ok(Some(vtable)) => {
vtable.map_move_nested(|o| queue.push_back(o));
}
}
}
let mut obligations = self.unsizing_obligations.borrow_mut();
assert!(obligations.is_empty());
*obligations = leftover_predicates;
let adjustment = AutoDerefRef {
autoderefs: if reborrow.is_some() { 1 } else { 0 },
autoref: reborrow,
unsize: Some(target)
};
debug!("Success, coerced with {}", adjustment.repr(self.tcx()));
Ok(Some(AdjustDerefRef(adjustment)))
}
fn coerce_from_fn_pointer(&self,
a: Ty<'tcx>,
fn_ty_a: &'tcx ty::BareFnTy<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx>
{
/*!
* Attempts to coerce from the type of a Rust function item
* into a closure or a `proc`.
*/
self.unpack_actual_value(b, |b| {
debug!("coerce_from_fn_pointer(a={}, b={})",
a.repr(self.tcx()), b.repr(self.tcx()));
if let ty::ty_bare_fn(None, fn_ty_b) = b.sty {
match (fn_ty_a.unsafety, fn_ty_b.unsafety) {
(ast::Unsafety::Normal, ast::Unsafety::Unsafe) => {
let unsafe_a = self.tcx().safe_to_unsafe_fn_ty(fn_ty_a);
try!(self.subtype(unsafe_a, b));
return Ok(Some(ty::AdjustUnsafeFnPointer));
}
_ => {}
}
}
self.subtype(a, b)
})
}
fn coerce_from_fn_item(&self,
a: Ty<'tcx>,
fn_ty_a: &'tcx ty::BareFnTy<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx> {
/*!
* Attempts to coerce from the type of a Rust function item
* into a closure or a `proc`.
*/
self.unpack_actual_value(b, |b| {
debug!("coerce_from_fn_item(a={}, b={})",
a.repr(self.tcx()), b.repr(self.tcx()));
match b.sty {
ty::ty_bare_fn(None, _) => {
let a_fn_pointer = ty::mk_bare_fn(self.tcx(), None, fn_ty_a);
try!(self.subtype(a_fn_pointer, b));
Ok(Some(ty::AdjustReifyFnPointer))
}
_ => self.subtype(a, b)
}
})
}
fn coerce_unsafe_ptr(&self,
a: Ty<'tcx>,
b: Ty<'tcx>,
mutbl_b: ast::Mutability)
-> CoerceResult<'tcx> {
debug!("coerce_unsafe_ptr(a={}, b={})",
a.repr(self.tcx()),
b.repr(self.tcx()));
let mt_a = match a.sty {
ty::ty_rptr(_, mt) | ty::ty_ptr(mt) => mt,
_ => {
return self.subtype(a, b);
}
};
// Check that the types which they point at are compatible.
let a_unsafe = ty::mk_ptr(self.tcx(), ty::mt{ mutbl: mutbl_b, ty: mt_a.ty });
try!(self.subtype(a_unsafe, b));
try!(coerce_mutbls(mt_a.mutbl, mutbl_b));
// Although references and unsafe ptrs have the same
// representation, we still register an AutoDerefRef so that
// regionck knows that the region for `a` must be valid here.
Ok(Some(AdjustDerefRef(AutoDerefRef {
autoderefs: 1,
autoref: Some(ty::AutoUnsafe(mutbl_b)),
unsize: None
})))
}
}
pub fn mk_assignty<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
expr: &ast::Expr,
a: Ty<'tcx>,
b: Ty<'tcx>)
| {
debug!("coerce_unsized(source={}, target={})",
source.repr(self.tcx()),
target.repr(self.tcx()));
let traits = (self.tcx().lang_items.unsize_trait(),
self.tcx().lang_items.coerce_unsized_trait());
let (unsize_did, coerce_unsized_did) = if let (Some(u), Some(cu)) = traits {
(u, cu)
} else {
debug!("Missing Unsize or CoerceUnsized traits");
return Err(ty::terr_mismatch);
};
// Note, we want to avoid unnecessary unsizing. We don't want to coerce to
// a DST unless we have to. This currently comes out in the wash since
// we can't unify [T] with U. But to properly support DST, we need to allow
// that, at which point we will need extra checks on the target here.
// Handle reborrows before selecting `Source: CoerceUnsized<Target>`. | identifier_body |
resolve.rs | use std::collections::{HashMap, HashSet};
use core::{Package, PackageId, SourceId};
use core::registry::PackageRegistry;
use core::resolver::{self, Resolve, Method};
use ops;
use util::CargoResult;
/// Resolve all dependencies for the specified `package` using the previous
/// lockfile as a guide if present.
///
/// This function will also write the result of resolution as a new
/// lockfile.
pub fn resolve_pkg(registry: &mut PackageRegistry, package: &Package)
-> CargoResult<Resolve> {
let prev = try!(ops::load_pkg_lockfile(package));
let resolve = try!(resolve_with_previous(registry, package,
Method::Everything,
prev.as_ref(), None));
if package.package_id().source_id().is_path() {
try!(ops::write_pkg_lockfile(package, &resolve));
}
Ok(resolve)
}
/// Resolve all dependencies for a package using an optional previous instance
/// of resolve to guide the resolution process.
///
/// This also takes an optional hash set, `to_avoid`, which is a list of package
/// ids that should be avoided when consulting the previous instance of resolve
/// (often used in pairings with updates).
///
/// The previous resolve normally comes from a lockfile. This function does not
/// read or write lockfiles from the filesystem.
pub fn resolve_with_previous<'a>(registry: &mut PackageRegistry,
package: &Package,
method: Method,
previous: Option<&'a Resolve>,
to_avoid: Option<&HashSet<&'a PackageId>>)
-> CargoResult<Resolve> {
try!(registry.add_sources(&[package.package_id().source_id()
.clone()]));
// Here we place an artificial limitation that all non-registry sources
// cannot be locked at more than one revision. This means that if a git
// repository provides more than one package, they must all be updated in
// step when any of them are updated.
//
// TODO: This seems like a hokey reason to single out the registry as being
// different
let mut to_avoid_sources = HashSet::new();
match to_avoid {
Some(set) => {
for package_id in set.iter() {
let source = package_id.source_id();
if!source.is_registry() {
to_avoid_sources.insert(source);
}
}
}
None => {}
}
let summary = package.summary().clone();
let summary = match previous {
Some(r) => {
// In the case where a previous instance of resolve is available, we
// want to lock as many packages as possible to the previous version
// without disturbing the graph structure. To this end we perform
// two actions here:
//
// 1. We inform the package registry of all locked packages. This
// involves informing it of both the locked package's id as well
// as the versions of all locked dependencies. The registry will
// then takes this information into account when it is queried.
//
// 2. The specified package's summary will have its dependencies
// modified to their precise variants. This will instruct the
// first step of the resolution process to not query for ranges
// but rather for precise dependency versions.
//
// This process must handle altered dependencies, however, as
// it's possible for a manifest to change over time to have
// dependencies added, removed, or modified to different version
// ranges. To deal with this, we only actually lock a dependency
// to the previously resolved version if the dependency listed
// still matches the locked version.
for node in r.iter().filter(|p| keep(p, to_avoid, &to_avoid_sources)) {
let deps = r.deps(node).into_iter().flat_map(|i| i)
.filter(|p| keep(p, to_avoid, &to_avoid_sources))
.map(|p| p.clone()).collect();
registry.register_lock(node.clone(), deps);
}
let map = r.deps(r.root()).into_iter().flat_map(|i| i).filter(|p| {
keep(p, to_avoid, &to_avoid_sources)
}).map(|d| {
(d.name(), d)
}).collect::<HashMap<_, _>>();
summary.map_dependencies(|d| {
match map.get(d.name()) {
Some(&lock) if d.matches_id(lock) => d.lock_to(lock),
_ => d,
}
})
}
None => summary,
};
| let mut resolved = try!(resolver::resolve(&summary, &method, registry));
match previous {
Some(r) => resolved.copy_metadata(r),
None => {}
}
return Ok(resolved);
fn keep<'a>(p: &&'a PackageId,
to_avoid_packages: Option<&HashSet<&'a PackageId>>,
to_avoid_sources: &HashSet<&'a SourceId>)
-> bool {
!to_avoid_sources.contains(&p.source_id()) && match to_avoid_packages {
Some(set) =>!set.contains(p),
None => true,
}
}
} | random_line_split |
|
resolve.rs | use std::collections::{HashMap, HashSet};
use core::{Package, PackageId, SourceId};
use core::registry::PackageRegistry;
use core::resolver::{self, Resolve, Method};
use ops;
use util::CargoResult;
/// Resolve all dependencies for the specified `package` using the previous
/// lockfile as a guide if present.
///
/// This function will also write the result of resolution as a new
/// lockfile.
pub fn resolve_pkg(registry: &mut PackageRegistry, package: &Package)
-> CargoResult<Resolve> {
let prev = try!(ops::load_pkg_lockfile(package));
let resolve = try!(resolve_with_previous(registry, package,
Method::Everything,
prev.as_ref(), None));
if package.package_id().source_id().is_path() {
try!(ops::write_pkg_lockfile(package, &resolve));
}
Ok(resolve)
}
/// Resolve all dependencies for a package using an optional previous instance
/// of resolve to guide the resolution process.
///
/// This also takes an optional hash set, `to_avoid`, which is a list of package
/// ids that should be avoided when consulting the previous instance of resolve
/// (often used in pairings with updates).
///
/// The previous resolve normally comes from a lockfile. This function does not
/// read or write lockfiles from the filesystem.
pub fn resolve_with_previous<'a>(registry: &mut PackageRegistry,
package: &Package,
method: Method,
previous: Option<&'a Resolve>,
to_avoid: Option<&HashSet<&'a PackageId>>)
-> CargoResult<Resolve> | }
}
None => {}
}
let summary = package.summary().clone();
let summary = match previous {
Some(r) => {
// In the case where a previous instance of resolve is available, we
// want to lock as many packages as possible to the previous version
// without disturbing the graph structure. To this end we perform
// two actions here:
//
// 1. We inform the package registry of all locked packages. This
// involves informing it of both the locked package's id as well
// as the versions of all locked dependencies. The registry will
// then takes this information into account when it is queried.
//
// 2. The specified package's summary will have its dependencies
// modified to their precise variants. This will instruct the
// first step of the resolution process to not query for ranges
// but rather for precise dependency versions.
//
// This process must handle altered dependencies, however, as
// it's possible for a manifest to change over time to have
// dependencies added, removed, or modified to different version
// ranges. To deal with this, we only actually lock a dependency
// to the previously resolved version if the dependency listed
// still matches the locked version.
for node in r.iter().filter(|p| keep(p, to_avoid, &to_avoid_sources)) {
let deps = r.deps(node).into_iter().flat_map(|i| i)
.filter(|p| keep(p, to_avoid, &to_avoid_sources))
.map(|p| p.clone()).collect();
registry.register_lock(node.clone(), deps);
}
let map = r.deps(r.root()).into_iter().flat_map(|i| i).filter(|p| {
keep(p, to_avoid, &to_avoid_sources)
}).map(|d| {
(d.name(), d)
}).collect::<HashMap<_, _>>();
summary.map_dependencies(|d| {
match map.get(d.name()) {
Some(&lock) if d.matches_id(lock) => d.lock_to(lock),
_ => d,
}
})
}
None => summary,
};
let mut resolved = try!(resolver::resolve(&summary, &method, registry));
match previous {
Some(r) => resolved.copy_metadata(r),
None => {}
}
return Ok(resolved);
fn keep<'a>(p: &&'a PackageId,
to_avoid_packages: Option<&HashSet<&'a PackageId>>,
to_avoid_sources: &HashSet<&'a SourceId>)
-> bool {
!to_avoid_sources.contains(&p.source_id()) && match to_avoid_packages {
Some(set) =>!set.contains(p),
None => true,
}
}
}
| {
try!(registry.add_sources(&[package.package_id().source_id()
.clone()]));
// Here we place an artificial limitation that all non-registry sources
// cannot be locked at more than one revision. This means that if a git
// repository provides more than one package, they must all be updated in
// step when any of them are updated.
//
// TODO: This seems like a hokey reason to single out the registry as being
// different
let mut to_avoid_sources = HashSet::new();
match to_avoid {
Some(set) => {
for package_id in set.iter() {
let source = package_id.source_id();
if !source.is_registry() {
to_avoid_sources.insert(source);
} | identifier_body |
resolve.rs | use std::collections::{HashMap, HashSet};
use core::{Package, PackageId, SourceId};
use core::registry::PackageRegistry;
use core::resolver::{self, Resolve, Method};
use ops;
use util::CargoResult;
/// Resolve all dependencies for the specified `package` using the previous
/// lockfile as a guide if present.
///
/// This function will also write the result of resolution as a new
/// lockfile.
pub fn resolve_pkg(registry: &mut PackageRegistry, package: &Package)
-> CargoResult<Resolve> {
let prev = try!(ops::load_pkg_lockfile(package));
let resolve = try!(resolve_with_previous(registry, package,
Method::Everything,
prev.as_ref(), None));
if package.package_id().source_id().is_path() {
try!(ops::write_pkg_lockfile(package, &resolve));
}
Ok(resolve)
}
/// Resolve all dependencies for a package using an optional previous instance
/// of resolve to guide the resolution process.
///
/// This also takes an optional hash set, `to_avoid`, which is a list of package
/// ids that should be avoided when consulting the previous instance of resolve
/// (often used in pairings with updates).
///
/// The previous resolve normally comes from a lockfile. This function does not
/// read or write lockfiles from the filesystem.
pub fn | <'a>(registry: &mut PackageRegistry,
package: &Package,
method: Method,
previous: Option<&'a Resolve>,
to_avoid: Option<&HashSet<&'a PackageId>>)
-> CargoResult<Resolve> {
try!(registry.add_sources(&[package.package_id().source_id()
.clone()]));
// Here we place an artificial limitation that all non-registry sources
// cannot be locked at more than one revision. This means that if a git
// repository provides more than one package, they must all be updated in
// step when any of them are updated.
//
// TODO: This seems like a hokey reason to single out the registry as being
// different
let mut to_avoid_sources = HashSet::new();
match to_avoid {
Some(set) => {
for package_id in set.iter() {
let source = package_id.source_id();
if!source.is_registry() {
to_avoid_sources.insert(source);
}
}
}
None => {}
}
let summary = package.summary().clone();
let summary = match previous {
Some(r) => {
// In the case where a previous instance of resolve is available, we
// want to lock as many packages as possible to the previous version
// without disturbing the graph structure. To this end we perform
// two actions here:
//
// 1. We inform the package registry of all locked packages. This
// involves informing it of both the locked package's id as well
// as the versions of all locked dependencies. The registry will
// then takes this information into account when it is queried.
//
// 2. The specified package's summary will have its dependencies
// modified to their precise variants. This will instruct the
// first step of the resolution process to not query for ranges
// but rather for precise dependency versions.
//
// This process must handle altered dependencies, however, as
// it's possible for a manifest to change over time to have
// dependencies added, removed, or modified to different version
// ranges. To deal with this, we only actually lock a dependency
// to the previously resolved version if the dependency listed
// still matches the locked version.
for node in r.iter().filter(|p| keep(p, to_avoid, &to_avoid_sources)) {
let deps = r.deps(node).into_iter().flat_map(|i| i)
.filter(|p| keep(p, to_avoid, &to_avoid_sources))
.map(|p| p.clone()).collect();
registry.register_lock(node.clone(), deps);
}
let map = r.deps(r.root()).into_iter().flat_map(|i| i).filter(|p| {
keep(p, to_avoid, &to_avoid_sources)
}).map(|d| {
(d.name(), d)
}).collect::<HashMap<_, _>>();
summary.map_dependencies(|d| {
match map.get(d.name()) {
Some(&lock) if d.matches_id(lock) => d.lock_to(lock),
_ => d,
}
})
}
None => summary,
};
let mut resolved = try!(resolver::resolve(&summary, &method, registry));
match previous {
Some(r) => resolved.copy_metadata(r),
None => {}
}
return Ok(resolved);
fn keep<'a>(p: &&'a PackageId,
to_avoid_packages: Option<&HashSet<&'a PackageId>>,
to_avoid_sources: &HashSet<&'a SourceId>)
-> bool {
!to_avoid_sources.contains(&p.source_id()) && match to_avoid_packages {
Some(set) =>!set.contains(p),
None => true,
}
}
}
| resolve_with_previous | identifier_name |
assign-to-method.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct | {
meows : usize,
how_hungry : isize,
}
impl cat {
pub fn speak(&self) { self.meows += 1_usize; }
}
fn cat(in_x : usize, in_y : isize) -> cat {
cat {
meows: in_x,
how_hungry: in_y
}
}
fn main() {
let nyan : cat = cat(52_usize, 99);
nyan.speak = || println!("meow"); //~ ERROR attempted to take value of method
}
| cat | identifier_name |
assign-to-method.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
| meows : usize,
how_hungry : isize,
}
impl cat {
pub fn speak(&self) { self.meows += 1_usize; }
}
fn cat(in_x : usize, in_y : isize) -> cat {
cat {
meows: in_x,
how_hungry: in_y
}
}
fn main() {
let nyan : cat = cat(52_usize, 99);
nyan.speak = || println!("meow"); //~ ERROR attempted to take value of method
} | struct cat { | random_line_split |
assign-to-method.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct cat {
meows : usize,
how_hungry : isize,
}
impl cat {
pub fn speak(&self) { self.meows += 1_usize; }
}
fn cat(in_x : usize, in_y : isize) -> cat {
cat {
meows: in_x,
how_hungry: in_y
}
}
fn main() | {
let nyan : cat = cat(52_usize, 99);
nyan.speak = || println!("meow"); //~ ERROR attempted to take value of method
} | identifier_body |
|
network_usage.rs | use futures::{Future, Stream};
use std::sync::Arc;
use tokio_timer::Timer;
use tokio_core::reactor::Handle;
use component::Component;
use error::{Error, Result};
use std::time::Duration;
use utils;
#[derive(Clone, PartialEq, Copy)]
pub enum Scale {
Binary,
Decimal,
}
impl Scale {
fn base(&self) -> u16 {
match *self {
Scale::Decimal => 1000, | Scale::Binary => 1024,
}
}
}
#[derive(Clone, Copy)]
pub enum Direction {
Incoming,
Outgoing,
}
pub struct NetworkUsage {
pub interface: String,
pub direction: Direction,
pub scale: Scale,
pub percision: u8,
pub refresh_frequency: Duration,
pub sample_duration: Duration,
}
impl Default for NetworkUsage {
fn default() -> NetworkUsage {
NetworkUsage {
interface: "eth0".to_string(),
direction: Direction::Incoming,
scale: Scale::Binary,
percision: 3,
refresh_frequency: Duration::from_secs(10),
sample_duration: Duration::from_secs(1),
}
}
}
fn get_prefix(scale: Scale, power: u8) -> &'static str {
match (scale, power) {
(Scale::Decimal, 0) | (Scale::Binary, 0) => "B/s",
(Scale::Decimal, 1) => "kb/s",
(Scale::Decimal, 2) => "Mb/s",
(Scale::Decimal, 3) => "Gb/s",
(Scale::Decimal, 4) => "Tb/s",
(Scale::Binary, 1) => "KiB/s",
(Scale::Binary, 2) => "MiB/s",
(Scale::Binary, 3) => "GiB/s",
(Scale::Binary, 4) => "TiB/s",
_ => "X/s",
}
}
fn get_number_scale(number: u64, scale: Scale) -> (f64, u8) {
let log = (number as f64).log(scale.base() as f64);
let wholes = log.floor();
let over = (scale.base() as f64).powf(log - wholes);
(over, wholes as u8)
}
fn get_bytes(interface: &str, dir: Direction) -> ::std::io::Result<Option<u64>> {
let dev = ::procinfo::net::dev::dev()?
.into_iter()
.find(|dev| dev.interface == interface);
let dev = match dev {
Some(dev) => dev,
None => return Ok(None),
};
match dir {
Direction::Incoming => Ok(Some(dev.receive_bytes)),
Direction::Outgoing => Ok(Some(dev.transmit_bytes)),
}
}
impl Component for NetworkUsage {
type Error = Error;
type Stream = Box<Stream<Item = String, Error = Error>>;
fn init(&mut self) -> Result<()> {
::procinfo::net::dev::dev()?
.into_iter()
.find(|dev| dev.interface == self.interface)
.ok_or_else(|| Error::from("No such network interface"))?;
Ok(())
}
fn stream(self, _: Handle) -> Self::Stream {
let conf = Arc::new(self);
utils::LoopFn::new(move || {
let timer = Timer::default();
let conf = conf.clone();
timer.sleep(conf.refresh_frequency)
.and_then(move |()| {
let conf = conf.clone();
let conf2 = conf.clone();
let first = get_bytes(conf.interface.as_str(), conf.direction)
.unwrap().unwrap();
timer.sleep(conf.sample_duration)
.and_then(move |()| {
let second = get_bytes(conf.interface.as_str(), conf.direction)
.unwrap().unwrap();
let per_second = (second-first)/conf.sample_duration.as_secs();
Ok(per_second)
})
.map(move |speed| {
let (num, power) = get_number_scale(speed, conf2.scale);
let x = 10f64.powi((conf2.percision-1) as i32);
let num = (num*x).round() / x;
format!("{} {}", num, get_prefix(conf2.scale, power))
})
})
}).map_err(|_| "timer error".into())
.boxed()
}
} | random_line_split |
|
network_usage.rs | use futures::{Future, Stream};
use std::sync::Arc;
use tokio_timer::Timer;
use tokio_core::reactor::Handle;
use component::Component;
use error::{Error, Result};
use std::time::Duration;
use utils;
#[derive(Clone, PartialEq, Copy)]
pub enum Scale {
Binary,
Decimal,
}
impl Scale {
fn base(&self) -> u16 {
match *self {
Scale::Decimal => 1000,
Scale::Binary => 1024,
}
}
}
#[derive(Clone, Copy)]
pub enum Direction {
Incoming,
Outgoing,
}
pub struct | {
pub interface: String,
pub direction: Direction,
pub scale: Scale,
pub percision: u8,
pub refresh_frequency: Duration,
pub sample_duration: Duration,
}
impl Default for NetworkUsage {
fn default() -> NetworkUsage {
NetworkUsage {
interface: "eth0".to_string(),
direction: Direction::Incoming,
scale: Scale::Binary,
percision: 3,
refresh_frequency: Duration::from_secs(10),
sample_duration: Duration::from_secs(1),
}
}
}
fn get_prefix(scale: Scale, power: u8) -> &'static str {
match (scale, power) {
(Scale::Decimal, 0) | (Scale::Binary, 0) => "B/s",
(Scale::Decimal, 1) => "kb/s",
(Scale::Decimal, 2) => "Mb/s",
(Scale::Decimal, 3) => "Gb/s",
(Scale::Decimal, 4) => "Tb/s",
(Scale::Binary, 1) => "KiB/s",
(Scale::Binary, 2) => "MiB/s",
(Scale::Binary, 3) => "GiB/s",
(Scale::Binary, 4) => "TiB/s",
_ => "X/s",
}
}
fn get_number_scale(number: u64, scale: Scale) -> (f64, u8) {
let log = (number as f64).log(scale.base() as f64);
let wholes = log.floor();
let over = (scale.base() as f64).powf(log - wholes);
(over, wholes as u8)
}
fn get_bytes(interface: &str, dir: Direction) -> ::std::io::Result<Option<u64>> {
let dev = ::procinfo::net::dev::dev()?
.into_iter()
.find(|dev| dev.interface == interface);
let dev = match dev {
Some(dev) => dev,
None => return Ok(None),
};
match dir {
Direction::Incoming => Ok(Some(dev.receive_bytes)),
Direction::Outgoing => Ok(Some(dev.transmit_bytes)),
}
}
impl Component for NetworkUsage {
type Error = Error;
type Stream = Box<Stream<Item = String, Error = Error>>;
fn init(&mut self) -> Result<()> {
::procinfo::net::dev::dev()?
.into_iter()
.find(|dev| dev.interface == self.interface)
.ok_or_else(|| Error::from("No such network interface"))?;
Ok(())
}
fn stream(self, _: Handle) -> Self::Stream {
let conf = Arc::new(self);
utils::LoopFn::new(move || {
let timer = Timer::default();
let conf = conf.clone();
timer.sleep(conf.refresh_frequency)
.and_then(move |()| {
let conf = conf.clone();
let conf2 = conf.clone();
let first = get_bytes(conf.interface.as_str(), conf.direction)
.unwrap().unwrap();
timer.sleep(conf.sample_duration)
.and_then(move |()| {
let second = get_bytes(conf.interface.as_str(), conf.direction)
.unwrap().unwrap();
let per_second = (second-first)/conf.sample_duration.as_secs();
Ok(per_second)
})
.map(move |speed| {
let (num, power) = get_number_scale(speed, conf2.scale);
let x = 10f64.powi((conf2.percision-1) as i32);
let num = (num*x).round() / x;
format!("{} {}", num, get_prefix(conf2.scale, power))
})
})
}).map_err(|_| "timer error".into())
.boxed()
}
}
| NetworkUsage | identifier_name |
network_usage.rs | use futures::{Future, Stream};
use std::sync::Arc;
use tokio_timer::Timer;
use tokio_core::reactor::Handle;
use component::Component;
use error::{Error, Result};
use std::time::Duration;
use utils;
#[derive(Clone, PartialEq, Copy)]
pub enum Scale {
Binary,
Decimal,
}
impl Scale {
fn base(&self) -> u16 {
match *self {
Scale::Decimal => 1000,
Scale::Binary => 1024,
}
}
}
#[derive(Clone, Copy)]
pub enum Direction {
Incoming,
Outgoing,
}
pub struct NetworkUsage {
pub interface: String,
pub direction: Direction,
pub scale: Scale,
pub percision: u8,
pub refresh_frequency: Duration,
pub sample_duration: Duration,
}
impl Default for NetworkUsage {
fn default() -> NetworkUsage |
}
fn get_prefix(scale: Scale, power: u8) -> &'static str {
match (scale, power) {
(Scale::Decimal, 0) | (Scale::Binary, 0) => "B/s",
(Scale::Decimal, 1) => "kb/s",
(Scale::Decimal, 2) => "Mb/s",
(Scale::Decimal, 3) => "Gb/s",
(Scale::Decimal, 4) => "Tb/s",
(Scale::Binary, 1) => "KiB/s",
(Scale::Binary, 2) => "MiB/s",
(Scale::Binary, 3) => "GiB/s",
(Scale::Binary, 4) => "TiB/s",
_ => "X/s",
}
}
fn get_number_scale(number: u64, scale: Scale) -> (f64, u8) {
let log = (number as f64).log(scale.base() as f64);
let wholes = log.floor();
let over = (scale.base() as f64).powf(log - wholes);
(over, wholes as u8)
}
fn get_bytes(interface: &str, dir: Direction) -> ::std::io::Result<Option<u64>> {
let dev = ::procinfo::net::dev::dev()?
.into_iter()
.find(|dev| dev.interface == interface);
let dev = match dev {
Some(dev) => dev,
None => return Ok(None),
};
match dir {
Direction::Incoming => Ok(Some(dev.receive_bytes)),
Direction::Outgoing => Ok(Some(dev.transmit_bytes)),
}
}
impl Component for NetworkUsage {
type Error = Error;
type Stream = Box<Stream<Item = String, Error = Error>>;
fn init(&mut self) -> Result<()> {
::procinfo::net::dev::dev()?
.into_iter()
.find(|dev| dev.interface == self.interface)
.ok_or_else(|| Error::from("No such network interface"))?;
Ok(())
}
fn stream(self, _: Handle) -> Self::Stream {
let conf = Arc::new(self);
utils::LoopFn::new(move || {
let timer = Timer::default();
let conf = conf.clone();
timer.sleep(conf.refresh_frequency)
.and_then(move |()| {
let conf = conf.clone();
let conf2 = conf.clone();
let first = get_bytes(conf.interface.as_str(), conf.direction)
.unwrap().unwrap();
timer.sleep(conf.sample_duration)
.and_then(move |()| {
let second = get_bytes(conf.interface.as_str(), conf.direction)
.unwrap().unwrap();
let per_second = (second-first)/conf.sample_duration.as_secs();
Ok(per_second)
})
.map(move |speed| {
let (num, power) = get_number_scale(speed, conf2.scale);
let x = 10f64.powi((conf2.percision-1) as i32);
let num = (num*x).round() / x;
format!("{} {}", num, get_prefix(conf2.scale, power))
})
})
}).map_err(|_| "timer error".into())
.boxed()
}
}
| {
NetworkUsage {
interface: "eth0".to_string(),
direction: Direction::Incoming,
scale: Scale::Binary,
percision: 3,
refresh_frequency: Duration::from_secs(10),
sample_duration: Duration::from_secs(1),
}
} | identifier_body |
float_context.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use geom::point::Point2D;
use geom::size::Size2D;
use geom::rect::Rect;
use gfx::geometry::{Au, max, min};
use std::util::replace;
use std::vec;
use std::i32::max_value;
#[deriving(Clone)]
pub enum FloatType{
FloatLeft,
FloatRight
}
pub enum ClearType {
ClearLeft,
ClearRight,
ClearBoth
}
struct FloatContextBase{
float_data: ~[Option<FloatData>],
floats_used: uint,
max_y : Au,
offset: Point2D<Au>
}
#[deriving(Clone)]
struct FloatData{
bounds: Rect<Au>,
f_type: FloatType
}
/// All information necessary to place a float
pub struct PlacementInfo{
width: Au, // The dimensions of the float
height: Au,
ceiling: Au, // The minimum top of the float, as determined by earlier elements
max_width: Au, // The maximum right of the float, generally determined by the contining block
f_type: FloatType // left or right
}
/// Wrappers around float methods. To avoid allocating data we'll never use,
/// destroy the context on modification.
pub enum FloatContext {
Invalid,
Valid(~FloatContextBase)
}
impl FloatContext {
pub fn new(num_floats: uint) -> FloatContext {
Valid(~FloatContextBase::new(num_floats))
}
#[inline(always)]
pub fn clone(&mut self) -> FloatContext {
match *self {
Invalid => fail!("Can't clone an invalid float context"),
Valid(_) => replace(self, Invalid)
}
}
#[inline(always)]
fn with_mut_base<R>(&mut self, callback: &fn(&mut FloatContextBase) -> R) -> R {
match *self {
Invalid => fail!("Float context no longer available"),
Valid(ref mut base) => callback(&mut **base)
}
}
#[inline(always)]
pub fn with_base<R>(&self, callback: &fn(&FloatContextBase) -> R) -> R {
match *self {
Invalid => fail!("Float context no longer available"),
Valid(ref base) => callback(& **base)
}
}
#[inline(always)]
pub fn translate(&mut self, trans: Point2D<Au>) -> FloatContext {
do self.with_mut_base |base| {
base.translate(trans);
}
replace(self, Invalid)
}
#[inline(always)]
pub fn available_rect(&mut self, top: Au, height: Au, max_x: Au) -> Option<Rect<Au>> {
do self.with_base |base| {
base.available_rect(top, height, max_x)
}
}
#[inline(always)]
pub fn add_float(&mut self, info: &PlacementInfo) -> FloatContext{
do self.with_mut_base |base| {
base.add_float(info);
}
replace(self, Invalid)
}
#[inline(always)]
pub fn place_between_floats(&self, info: &PlacementInfo) -> Rect<Au> |
#[inline(always)]
pub fn last_float_pos(&mut self) -> Point2D<Au> {
do self.with_base |base| {
base.last_float_pos()
}
}
#[inline(always)]
pub fn clearance(&self, clear: ClearType) -> Au {
do self.with_base |base| {
base.clearance(clear)
}
}
}
impl FloatContextBase{
fn new(num_floats: uint) -> FloatContextBase {
debug!("Creating float context of size %?", num_floats);
let new_data = vec::from_elem(num_floats, None);
FloatContextBase {
float_data: new_data,
floats_used: 0,
max_y: Au(0),
offset: Point2D(Au(0), Au(0))
}
}
fn translate(&mut self, trans: Point2D<Au>) {
self.offset = self.offset + trans;
}
fn last_float_pos(&self) -> Point2D<Au> {
assert!(self.floats_used > 0, "Error: tried to access FloatContext with no floats in it");
match self.float_data[self.floats_used - 1] {
None => fail!("FloatContext error: floats should never be None here"),
Some(float) => {
debug!("Returning float position: %?", float.bounds.origin + self.offset);
float.bounds.origin + self.offset
}
}
}
/// Returns a rectangle that encloses the region from top to top + height,
/// with width small enough that it doesn't collide with any floats. max_x
/// is the x-coordinate beyond which floats have no effect (generally
/// this is the containing block width).
fn available_rect(&self, top: Au, height: Au, max_x: Au) -> Option<Rect<Au>> {
fn range_intersect(top_1: Au, bottom_1: Au, top_2: Au, bottom_2: Au) -> (Au, Au) {
(max(top_1, top_2), min(bottom_1, bottom_2))
}
let top = top - self.offset.y;
debug!("available_rect: trying to find space at %?", top);
// Relevant dimensions for the right-most left float
let mut max_left = Au(0) - self.offset.x;
let mut l_top = None;
let mut l_bottom = None;
// Relevant dimensions for the left-most right float
let mut min_right = max_x - self.offset.x;
let mut r_top = None;
let mut r_bottom = None;
// Find the float collisions for the given vertical range.
for float in self.float_data.iter() {
debug!("available_rect: Checking for collision against float");
match *float{
None => (),
Some(data) => {
let float_pos = data.bounds.origin;
let float_size = data.bounds.size;
debug!("float_pos: %?, float_size: %?", float_pos, float_size);
match data.f_type {
FloatLeft => {
if(float_pos.x + float_size.width > max_left &&
float_pos.y + float_size.height > top && float_pos.y < top + height) {
max_left = float_pos.x + float_size.width;
l_top = Some(float_pos.y);
l_bottom = Some(float_pos.y + float_size.height);
debug!("available_rect: collision with left float: new max_left is %?",
max_left);
}
}
FloatRight => {
if(float_pos.x < min_right &&
float_pos.y + float_size.height > top && float_pos.y < top + height) {
min_right = float_pos.x;
r_top = Some(float_pos.y);
r_bottom = Some(float_pos.y + float_size.height);
debug!("available_rect: collision with right float: new min_right is %?",
min_right);
}
}
}
}
};
}
// Extend the vertical range of the rectangle to the closest floats.
// If there are floats on both sides, take the intersection of the
// two areas. Also make sure we never return a top smaller than the
// given upper bound.
let (top, bottom) = match (r_top, r_bottom, l_top, l_bottom) {
(Some(r_top), Some(r_bottom), Some(l_top), Some(l_bottom)) =>
range_intersect(max(top, r_top), r_bottom, max(top, l_top), l_bottom),
(None, None, Some(l_top), Some(l_bottom)) => (max(top, l_top), l_bottom),
(Some(r_top), Some(r_bottom), None, None) => (max(top, r_top), r_bottom),
(None, None, None, None) => return None,
_ => fail!("Reached unreachable state when computing float area")
};
// This assertion is too strong and fails in some cases. It is OK to
// return negative widths since we check against that right away, but
// we should still undersrtand why they occur and add a stronger
// assertion here.
//assert!(max_left < min_right);
assert!(top <= bottom, "Float position error");
Some(Rect{
origin: Point2D(max_left, top) + self.offset,
size: Size2D(min_right - max_left, bottom - top)
})
}
fn add_float(&mut self, info: &PlacementInfo) {
debug!("Floats_used: %?, Floats available: %?", self.floats_used, self.float_data.len());
assert!(self.floats_used < self.float_data.len() &&
self.float_data[self.floats_used].is_none());
let new_info = PlacementInfo {
width: info.width,
height: info.height,
ceiling: max(info.ceiling, self.max_y + self.offset.y),
max_width: info.max_width,
f_type: info.f_type
};
debug!("add_float: added float with info %?", new_info);
let new_float = FloatData {
bounds: Rect {
origin: self.place_between_floats(&new_info).origin - self.offset,
size: Size2D(info.width, info.height)
},
f_type: info.f_type
};
self.float_data[self.floats_used] = Some(new_float);
self.max_y = max(self.max_y, new_float.bounds.origin.y);
self.floats_used += 1;
}
/// Returns true if the given rect overlaps with any floats.
fn collides_with_float(&self, bounds: &Rect<Au>) -> bool {
for float in self.float_data.iter() {
match *float{
None => (),
Some(data) => {
if data.bounds.translate(&self.offset).intersects(bounds) {
return true;
}
}
};
}
return false;
}
/// Given the top 3 sides of the rectange, finds the largest height that
/// will result in the rectange not colliding with any floats. Returns
/// None if that height is infinite.
fn max_height_for_bounds(&self, left: Au, top: Au, width: Au) -> Option<Au> {
let top = top - self.offset.y;
let left = left - self.offset.x;
let mut max_height = None;
for float in self.float_data.iter() {
match *float {
None => (),
Some(f_data) => {
if f_data.bounds.origin.y + f_data.bounds.size.height > top &&
f_data.bounds.origin.x + f_data.bounds.size.width > left &&
f_data.bounds.origin.x < left + width {
let new_y = f_data.bounds.origin.y;
max_height = Some(min(max_height.unwrap_or_default(new_y), new_y));
}
}
}
}
max_height.map(|h| h + self.offset.y)
}
/// Given necessary info, finds the closest place a box can be positioned
/// without colliding with any floats.
fn place_between_floats(&self, info: &PlacementInfo) -> Rect<Au>{
debug!("place_float: Placing float with width %? and height %?", info.width, info.height);
// Can't go any higher than previous floats or
// previous elements in the document.
let mut float_y = info.ceiling;
loop {
let maybe_location = self.available_rect(float_y, info.height, info.max_width);
debug!("place_float: Got available rect: %? for y-pos: %?", maybe_location, float_y);
match maybe_location {
// If there are no floats blocking us, return the current location
// TODO(eatknson): integrate with overflow
None => return match info.f_type {
FloatLeft => Rect(Point2D(Au(0), float_y),
Size2D(info.max_width, Au(max_value))),
FloatRight => Rect(Point2D(info.max_width - info.width, float_y),
Size2D(info.max_width, Au(max_value)))
},
Some(rect) => {
assert!(rect.origin.y + rect.size.height!= float_y,
"Non-terminating float placement");
// Place here if there is enough room
if (rect.size.width >= info.width) {
let height = self.max_height_for_bounds(rect.origin.x,
rect.origin.y,
rect.size.width);
let height = height.unwrap_or_default(Au(max_value));
return match info.f_type {
FloatLeft => Rect(Point2D(rect.origin.x, float_y),
Size2D(rect.size.width, height)),
FloatRight => {
Rect(Point2D(rect.origin.x + rect.size.width - info.width, float_y),
Size2D(rect.size.width, height))
}
};
}
// Try to place at the next-lowest location.
// Need to be careful of fencepost errors.
float_y = rect.origin.y + rect.size.height;
}
}
}
}
fn clearance(&self, clear: ClearType) -> Au {
let mut clearance = Au(0);
for float in self.float_data.iter() {
match *float {
None => (),
Some(f_data) => {
match (clear, f_data.f_type) {
(ClearLeft, FloatLeft) |
(ClearRight, FloatRight) |
(ClearBoth, _) => {
clearance = max(
clearance,
self.offset.y + f_data.bounds.origin.y + f_data.bounds.size.height);
}
_ => ()
}
}
}
}
clearance
}
}
| {
do self.with_base |base| {
base.place_between_floats(info)
}
} | identifier_body |
float_context.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use geom::point::Point2D;
use geom::size::Size2D;
use geom::rect::Rect;
use gfx::geometry::{Au, max, min};
use std::util::replace;
use std::vec;
use std::i32::max_value;
#[deriving(Clone)]
pub enum FloatType{
FloatLeft,
FloatRight
}
pub enum ClearType {
ClearLeft,
ClearRight,
ClearBoth
}
struct FloatContextBase{
float_data: ~[Option<FloatData>],
floats_used: uint,
max_y : Au,
offset: Point2D<Au>
}
#[deriving(Clone)]
struct FloatData{
bounds: Rect<Au>,
f_type: FloatType
}
/// All information necessary to place a float
pub struct PlacementInfo{
width: Au, // The dimensions of the float
height: Au,
ceiling: Au, // The minimum top of the float, as determined by earlier elements
max_width: Au, // The maximum right of the float, generally determined by the contining block
f_type: FloatType // left or right
}
/// Wrappers around float methods. To avoid allocating data we'll never use,
/// destroy the context on modification.
pub enum FloatContext {
Invalid,
Valid(~FloatContextBase)
}
impl FloatContext {
pub fn new(num_floats: uint) -> FloatContext {
Valid(~FloatContextBase::new(num_floats))
}
#[inline(always)]
pub fn clone(&mut self) -> FloatContext {
match *self {
Invalid => fail!("Can't clone an invalid float context"),
Valid(_) => replace(self, Invalid)
}
}
#[inline(always)]
fn with_mut_base<R>(&mut self, callback: &fn(&mut FloatContextBase) -> R) -> R {
match *self {
Invalid => fail!("Float context no longer available"),
Valid(ref mut base) => callback(&mut **base)
}
}
#[inline(always)]
pub fn with_base<R>(&self, callback: &fn(&FloatContextBase) -> R) -> R {
match *self {
Invalid => fail!("Float context no longer available"),
Valid(ref base) => callback(& **base)
}
}
#[inline(always)]
pub fn translate(&mut self, trans: Point2D<Au>) -> FloatContext {
do self.with_mut_base |base| {
base.translate(trans);
}
replace(self, Invalid)
}
#[inline(always)]
pub fn available_rect(&mut self, top: Au, height: Au, max_x: Au) -> Option<Rect<Au>> {
do self.with_base |base| {
base.available_rect(top, height, max_x)
}
}
#[inline(always)]
pub fn add_float(&mut self, info: &PlacementInfo) -> FloatContext{
do self.with_mut_base |base| {
base.add_float(info);
}
replace(self, Invalid)
}
#[inline(always)]
pub fn place_between_floats(&self, info: &PlacementInfo) -> Rect<Au> {
do self.with_base |base| {
base.place_between_floats(info)
}
}
#[inline(always)]
pub fn last_float_pos(&mut self) -> Point2D<Au> {
do self.with_base |base| {
base.last_float_pos()
}
}
#[inline(always)]
pub fn clearance(&self, clear: ClearType) -> Au {
do self.with_base |base| {
base.clearance(clear)
}
}
}
impl FloatContextBase{
fn new(num_floats: uint) -> FloatContextBase {
debug!("Creating float context of size %?", num_floats);
let new_data = vec::from_elem(num_floats, None);
FloatContextBase {
float_data: new_data,
floats_used: 0,
max_y: Au(0),
offset: Point2D(Au(0), Au(0))
}
}
fn translate(&mut self, trans: Point2D<Au>) {
self.offset = self.offset + trans;
}
fn last_float_pos(&self) -> Point2D<Au> {
assert!(self.floats_used > 0, "Error: tried to access FloatContext with no floats in it");
match self.float_data[self.floats_used - 1] {
None => fail!("FloatContext error: floats should never be None here"),
Some(float) => {
debug!("Returning float position: %?", float.bounds.origin + self.offset);
float.bounds.origin + self.offset
}
}
}
/// Returns a rectangle that encloses the region from top to top + height,
/// with width small enough that it doesn't collide with any floats. max_x
/// is the x-coordinate beyond which floats have no effect (generally
/// this is the containing block width).
fn available_rect(&self, top: Au, height: Au, max_x: Au) -> Option<Rect<Au>> {
fn range_intersect(top_1: Au, bottom_1: Au, top_2: Au, bottom_2: Au) -> (Au, Au) {
(max(top_1, top_2), min(bottom_1, bottom_2))
}
let top = top - self.offset.y;
debug!("available_rect: trying to find space at %?", top);
// Relevant dimensions for the right-most left float
let mut max_left = Au(0) - self.offset.x;
let mut l_top = None;
let mut l_bottom = None;
// Relevant dimensions for the left-most right float
let mut min_right = max_x - self.offset.x;
let mut r_top = None;
let mut r_bottom = None;
// Find the float collisions for the given vertical range.
for float in self.float_data.iter() {
debug!("available_rect: Checking for collision against float");
match *float{
None => (),
Some(data) => {
let float_pos = data.bounds.origin;
let float_size = data.bounds.size;
debug!("float_pos: %?, float_size: %?", float_pos, float_size);
match data.f_type {
FloatLeft => {
if(float_pos.x + float_size.width > max_left &&
float_pos.y + float_size.height > top && float_pos.y < top + height) {
max_left = float_pos.x + float_size.width;
l_top = Some(float_pos.y);
l_bottom = Some(float_pos.y + float_size.height);
debug!("available_rect: collision with left float: new max_left is %?",
max_left);
}
}
FloatRight => {
if(float_pos.x < min_right &&
float_pos.y + float_size.height > top && float_pos.y < top + height) {
min_right = float_pos.x;
r_top = Some(float_pos.y);
r_bottom = Some(float_pos.y + float_size.height);
debug!("available_rect: collision with right float: new min_right is %?",
min_right);
}
}
}
}
};
}
// Extend the vertical range of the rectangle to the closest floats.
// If there are floats on both sides, take the intersection of the
// two areas. Also make sure we never return a top smaller than the
// given upper bound.
let (top, bottom) = match (r_top, r_bottom, l_top, l_bottom) {
(Some(r_top), Some(r_bottom), Some(l_top), Some(l_bottom)) =>
range_intersect(max(top, r_top), r_bottom, max(top, l_top), l_bottom),
(None, None, Some(l_top), Some(l_bottom)) => (max(top, l_top), l_bottom),
(Some(r_top), Some(r_bottom), None, None) => (max(top, r_top), r_bottom),
(None, None, None, None) => return None,
_ => fail!("Reached unreachable state when computing float area")
};
// This assertion is too strong and fails in some cases. It is OK to
// return negative widths since we check against that right away, but
// we should still undersrtand why they occur and add a stronger
// assertion here.
//assert!(max_left < min_right);
assert!(top <= bottom, "Float position error");
Some(Rect{
origin: Point2D(max_left, top) + self.offset,
size: Size2D(min_right - max_left, bottom - top)
})
}
fn add_float(&mut self, info: &PlacementInfo) {
debug!("Floats_used: %?, Floats available: %?", self.floats_used, self.float_data.len());
assert!(self.floats_used < self.float_data.len() &&
self.float_data[self.floats_used].is_none());
let new_info = PlacementInfo {
width: info.width,
height: info.height,
ceiling: max(info.ceiling, self.max_y + self.offset.y),
max_width: info.max_width,
f_type: info.f_type
};
debug!("add_float: added float with info %?", new_info);
let new_float = FloatData {
bounds: Rect {
origin: self.place_between_floats(&new_info).origin - self.offset,
size: Size2D(info.width, info.height)
},
f_type: info.f_type
};
self.float_data[self.floats_used] = Some(new_float);
self.max_y = max(self.max_y, new_float.bounds.origin.y);
self.floats_used += 1;
}
/// Returns true if the given rect overlaps with any floats.
fn | (&self, bounds: &Rect<Au>) -> bool {
for float in self.float_data.iter() {
match *float{
None => (),
Some(data) => {
if data.bounds.translate(&self.offset).intersects(bounds) {
return true;
}
}
};
}
return false;
}
/// Given the top 3 sides of the rectange, finds the largest height that
/// will result in the rectange not colliding with any floats. Returns
/// None if that height is infinite.
fn max_height_for_bounds(&self, left: Au, top: Au, width: Au) -> Option<Au> {
let top = top - self.offset.y;
let left = left - self.offset.x;
let mut max_height = None;
for float in self.float_data.iter() {
match *float {
None => (),
Some(f_data) => {
if f_data.bounds.origin.y + f_data.bounds.size.height > top &&
f_data.bounds.origin.x + f_data.bounds.size.width > left &&
f_data.bounds.origin.x < left + width {
let new_y = f_data.bounds.origin.y;
max_height = Some(min(max_height.unwrap_or_default(new_y), new_y));
}
}
}
}
max_height.map(|h| h + self.offset.y)
}
/// Given necessary info, finds the closest place a box can be positioned
/// without colliding with any floats.
fn place_between_floats(&self, info: &PlacementInfo) -> Rect<Au>{
debug!("place_float: Placing float with width %? and height %?", info.width, info.height);
// Can't go any higher than previous floats or
// previous elements in the document.
let mut float_y = info.ceiling;
loop {
let maybe_location = self.available_rect(float_y, info.height, info.max_width);
debug!("place_float: Got available rect: %? for y-pos: %?", maybe_location, float_y);
match maybe_location {
// If there are no floats blocking us, return the current location
// TODO(eatknson): integrate with overflow
None => return match info.f_type {
FloatLeft => Rect(Point2D(Au(0), float_y),
Size2D(info.max_width, Au(max_value))),
FloatRight => Rect(Point2D(info.max_width - info.width, float_y),
Size2D(info.max_width, Au(max_value)))
},
Some(rect) => {
assert!(rect.origin.y + rect.size.height!= float_y,
"Non-terminating float placement");
// Place here if there is enough room
if (rect.size.width >= info.width) {
let height = self.max_height_for_bounds(rect.origin.x,
rect.origin.y,
rect.size.width);
let height = height.unwrap_or_default(Au(max_value));
return match info.f_type {
FloatLeft => Rect(Point2D(rect.origin.x, float_y),
Size2D(rect.size.width, height)),
FloatRight => {
Rect(Point2D(rect.origin.x + rect.size.width - info.width, float_y),
Size2D(rect.size.width, height))
}
};
}
// Try to place at the next-lowest location.
// Need to be careful of fencepost errors.
float_y = rect.origin.y + rect.size.height;
}
}
}
}
fn clearance(&self, clear: ClearType) -> Au {
let mut clearance = Au(0);
for float in self.float_data.iter() {
match *float {
None => (),
Some(f_data) => {
match (clear, f_data.f_type) {
(ClearLeft, FloatLeft) |
(ClearRight, FloatRight) |
(ClearBoth, _) => {
clearance = max(
clearance,
self.offset.y + f_data.bounds.origin.y + f_data.bounds.size.height);
}
_ => ()
}
}
}
}
clearance
}
}
| collides_with_float | identifier_name |
float_context.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use geom::point::Point2D;
use geom::size::Size2D;
use geom::rect::Rect;
use gfx::geometry::{Au, max, min};
use std::util::replace;
use std::vec;
use std::i32::max_value;
#[deriving(Clone)]
pub enum FloatType{
FloatLeft,
FloatRight
}
pub enum ClearType {
ClearLeft,
ClearRight,
ClearBoth
}
struct FloatContextBase{
float_data: ~[Option<FloatData>],
floats_used: uint,
max_y : Au,
offset: Point2D<Au>
}
#[deriving(Clone)]
struct FloatData{
bounds: Rect<Au>,
f_type: FloatType
}
/// All information necessary to place a float
pub struct PlacementInfo{
width: Au, // The dimensions of the float
height: Au,
ceiling: Au, // The minimum top of the float, as determined by earlier elements
max_width: Au, // The maximum right of the float, generally determined by the contining block
f_type: FloatType // left or right
}
/// Wrappers around float methods. To avoid allocating data we'll never use,
/// destroy the context on modification.
pub enum FloatContext {
Invalid,
Valid(~FloatContextBase)
}
impl FloatContext {
pub fn new(num_floats: uint) -> FloatContext {
Valid(~FloatContextBase::new(num_floats))
}
#[inline(always)]
pub fn clone(&mut self) -> FloatContext {
match *self {
Invalid => fail!("Can't clone an invalid float context"),
Valid(_) => replace(self, Invalid)
}
}
#[inline(always)]
fn with_mut_base<R>(&mut self, callback: &fn(&mut FloatContextBase) -> R) -> R {
match *self {
Invalid => fail!("Float context no longer available"),
Valid(ref mut base) => callback(&mut **base)
}
}
#[inline(always)]
pub fn with_base<R>(&self, callback: &fn(&FloatContextBase) -> R) -> R {
match *self {
Invalid => fail!("Float context no longer available"),
Valid(ref base) => callback(& **base)
}
}
#[inline(always)]
pub fn translate(&mut self, trans: Point2D<Au>) -> FloatContext {
do self.with_mut_base |base| {
base.translate(trans);
}
replace(self, Invalid)
}
#[inline(always)]
pub fn available_rect(&mut self, top: Au, height: Au, max_x: Au) -> Option<Rect<Au>> {
do self.with_base |base| {
base.available_rect(top, height, max_x)
}
}
#[inline(always)]
pub fn add_float(&mut self, info: &PlacementInfo) -> FloatContext{
do self.with_mut_base |base| {
base.add_float(info);
}
replace(self, Invalid)
}
#[inline(always)]
pub fn place_between_floats(&self, info: &PlacementInfo) -> Rect<Au> {
do self.with_base |base| {
base.place_between_floats(info)
}
}
#[inline(always)]
pub fn last_float_pos(&mut self) -> Point2D<Au> {
do self.with_base |base| {
base.last_float_pos()
}
}
#[inline(always)]
pub fn clearance(&self, clear: ClearType) -> Au {
do self.with_base |base| {
base.clearance(clear)
}
}
}
impl FloatContextBase{
fn new(num_floats: uint) -> FloatContextBase {
debug!("Creating float context of size %?", num_floats);
let new_data = vec::from_elem(num_floats, None);
FloatContextBase {
float_data: new_data,
floats_used: 0,
max_y: Au(0),
offset: Point2D(Au(0), Au(0))
}
}
fn translate(&mut self, trans: Point2D<Au>) {
self.offset = self.offset + trans;
}
fn last_float_pos(&self) -> Point2D<Au> {
assert!(self.floats_used > 0, "Error: tried to access FloatContext with no floats in it");
match self.float_data[self.floats_used - 1] {
None => fail!("FloatContext error: floats should never be None here"),
Some(float) => {
debug!("Returning float position: %?", float.bounds.origin + self.offset);
float.bounds.origin + self.offset
}
}
}
/// Returns a rectangle that encloses the region from top to top + height,
/// with width small enough that it doesn't collide with any floats. max_x
/// is the x-coordinate beyond which floats have no effect (generally
/// this is the containing block width).
fn available_rect(&self, top: Au, height: Au, max_x: Au) -> Option<Rect<Au>> {
fn range_intersect(top_1: Au, bottom_1: Au, top_2: Au, bottom_2: Au) -> (Au, Au) {
(max(top_1, top_2), min(bottom_1, bottom_2))
}
let top = top - self.offset.y;
debug!("available_rect: trying to find space at %?", top);
// Relevant dimensions for the right-most left float
let mut max_left = Au(0) - self.offset.x;
let mut l_top = None;
let mut l_bottom = None;
// Relevant dimensions for the left-most right float
let mut min_right = max_x - self.offset.x;
let mut r_top = None;
let mut r_bottom = None;
// Find the float collisions for the given vertical range.
for float in self.float_data.iter() {
debug!("available_rect: Checking for collision against float");
match *float{
None => (),
Some(data) => {
let float_pos = data.bounds.origin;
let float_size = data.bounds.size;
debug!("float_pos: %?, float_size: %?", float_pos, float_size);
match data.f_type {
FloatLeft => {
if(float_pos.x + float_size.width > max_left &&
float_pos.y + float_size.height > top && float_pos.y < top + height) {
max_left = float_pos.x + float_size.width;
l_top = Some(float_pos.y);
l_bottom = Some(float_pos.y + float_size.height);
debug!("available_rect: collision with left float: new max_left is %?",
max_left);
}
}
FloatRight => {
if(float_pos.x < min_right &&
float_pos.y + float_size.height > top && float_pos.y < top + height) {
min_right = float_pos.x;
r_top = Some(float_pos.y);
r_bottom = Some(float_pos.y + float_size.height);
debug!("available_rect: collision with right float: new min_right is %?",
min_right);
}
}
}
}
};
}
// Extend the vertical range of the rectangle to the closest floats.
// If there are floats on both sides, take the intersection of the
// two areas. Also make sure we never return a top smaller than the
// given upper bound.
let (top, bottom) = match (r_top, r_bottom, l_top, l_bottom) {
(Some(r_top), Some(r_bottom), Some(l_top), Some(l_bottom)) =>
range_intersect(max(top, r_top), r_bottom, max(top, l_top), l_bottom),
(None, None, Some(l_top), Some(l_bottom)) => (max(top, l_top), l_bottom),
(Some(r_top), Some(r_bottom), None, None) => (max(top, r_top), r_bottom),
(None, None, None, None) => return None,
_ => fail!("Reached unreachable state when computing float area")
};
// This assertion is too strong and fails in some cases. It is OK to
// return negative widths since we check against that right away, but
// we should still undersrtand why they occur and add a stronger
// assertion here.
//assert!(max_left < min_right);
assert!(top <= bottom, "Float position error");
Some(Rect{
origin: Point2D(max_left, top) + self.offset,
size: Size2D(min_right - max_left, bottom - top)
})
}
fn add_float(&mut self, info: &PlacementInfo) {
debug!("Floats_used: %?, Floats available: %?", self.floats_used, self.float_data.len());
assert!(self.floats_used < self.float_data.len() &&
self.float_data[self.floats_used].is_none());
let new_info = PlacementInfo {
width: info.width,
height: info.height,
ceiling: max(info.ceiling, self.max_y + self.offset.y),
max_width: info.max_width,
f_type: info.f_type
};
debug!("add_float: added float with info %?", new_info);
let new_float = FloatData {
bounds: Rect {
origin: self.place_between_floats(&new_info).origin - self.offset,
size: Size2D(info.width, info.height)
},
f_type: info.f_type
};
self.float_data[self.floats_used] = Some(new_float);
self.max_y = max(self.max_y, new_float.bounds.origin.y);
self.floats_used += 1;
}
/// Returns true if the given rect overlaps with any floats.
fn collides_with_float(&self, bounds: &Rect<Au>) -> bool {
for float in self.float_data.iter() {
match *float{
None => (),
Some(data) => {
if data.bounds.translate(&self.offset).intersects(bounds) {
return true;
}
}
};
}
return false;
}
/// Given the top 3 sides of the rectange, finds the largest height that
/// will result in the rectange not colliding with any floats. Returns
/// None if that height is infinite.
fn max_height_for_bounds(&self, left: Au, top: Au, width: Au) -> Option<Au> {
let top = top - self.offset.y;
let left = left - self.offset.x;
let mut max_height = None;
for float in self.float_data.iter() {
match *float {
None => (),
Some(f_data) => {
if f_data.bounds.origin.y + f_data.bounds.size.height > top &&
f_data.bounds.origin.x + f_data.bounds.size.width > left &&
f_data.bounds.origin.x < left + width {
let new_y = f_data.bounds.origin.y;
max_height = Some(min(max_height.unwrap_or_default(new_y), new_y));
}
}
}
}
max_height.map(|h| h + self.offset.y)
}
/// Given necessary info, finds the closest place a box can be positioned
/// without colliding with any floats.
fn place_between_floats(&self, info: &PlacementInfo) -> Rect<Au>{
debug!("place_float: Placing float with width %? and height %?", info.width, info.height);
// Can't go any higher than previous floats or
// previous elements in the document.
let mut float_y = info.ceiling;
loop {
let maybe_location = self.available_rect(float_y, info.height, info.max_width);
debug!("place_float: Got available rect: %? for y-pos: %?", maybe_location, float_y);
match maybe_location {
// If there are no floats blocking us, return the current location
// TODO(eatknson): integrate with overflow
None => return match info.f_type {
FloatLeft => Rect(Point2D(Au(0), float_y),
Size2D(info.max_width, Au(max_value))),
FloatRight => Rect(Point2D(info.max_width - info.width, float_y), | "Non-terminating float placement");
// Place here if there is enough room
if (rect.size.width >= info.width) {
let height = self.max_height_for_bounds(rect.origin.x,
rect.origin.y,
rect.size.width);
let height = height.unwrap_or_default(Au(max_value));
return match info.f_type {
FloatLeft => Rect(Point2D(rect.origin.x, float_y),
Size2D(rect.size.width, height)),
FloatRight => {
Rect(Point2D(rect.origin.x + rect.size.width - info.width, float_y),
Size2D(rect.size.width, height))
}
};
}
// Try to place at the next-lowest location.
// Need to be careful of fencepost errors.
float_y = rect.origin.y + rect.size.height;
}
}
}
}
fn clearance(&self, clear: ClearType) -> Au {
let mut clearance = Au(0);
for float in self.float_data.iter() {
match *float {
None => (),
Some(f_data) => {
match (clear, f_data.f_type) {
(ClearLeft, FloatLeft) |
(ClearRight, FloatRight) |
(ClearBoth, _) => {
clearance = max(
clearance,
self.offset.y + f_data.bounds.origin.y + f_data.bounds.size.height);
}
_ => ()
}
}
}
}
clearance
}
} | Size2D(info.max_width, Au(max_value)))
},
Some(rect) => {
assert!(rect.origin.y + rect.size.height != float_y, | random_line_split |
counter.rs | //! A wrapper which counts the number of records pushed past and updates a shared count map.
use std::rc::Rc; | use std::cell::RefCell;
use progress::CountMap;
use dataflow::channels::Content;
use Push;
/// A wrapper which updates shared `counts` based on the number of records pushed.
pub struct Counter<T, D, P: Push<(T, Content<D>)>> {
pushee: P,
counts: Rc<RefCell<CountMap<T>>>,
phantom: ::std::marker::PhantomData<D>,
}
impl<T, D, P: Push<(T, Content<D>)>> Push<(T, Content<D>)> for Counter<T, D, P> where T : Eq+Clone+'static {
#[inline]
fn push(&mut self, message: &mut Option<(T, Content<D>)>) {
if let Some((ref time, ref data)) = *message {
self.counts.borrow_mut().update(time, data.len() as i64);
}
// only propagate `None` if dirty (indicates flush)
if message.is_some() || self.counts.borrow().len() > 0 {
self.pushee.push(message);
}
}
}
impl<T, D, P: Push<(T, Content<D>)>> Counter<T, D, P> where T : Eq+Clone+'static {
/// Allocates a new `Counter` from a pushee and shared counts.
pub fn new(pushee: P, counts: Rc<RefCell<CountMap<T>>>) -> Counter<T, D, P> {
Counter {
pushee: pushee,
counts: counts,
phantom: ::std::marker::PhantomData,
}
}
/// Extracts shared counts into `updates`.
///
/// It is unclear why this method exists at the same time the counts are shared.
/// Perhaps this should be investigated, and only one pattern used. Seriously.
#[inline] pub fn pull_progress(&mut self, updates: &mut CountMap<T>) {
while let Some((ref time, delta)) = self.counts.borrow_mut().pop() {
updates.update(time, delta);
}
}
} | random_line_split |
|
counter.rs | //! A wrapper which counts the number of records pushed past and updates a shared count map.
use std::rc::Rc;
use std::cell::RefCell;
use progress::CountMap;
use dataflow::channels::Content;
use Push;
/// A wrapper which updates shared `counts` based on the number of records pushed.
pub struct Counter<T, D, P: Push<(T, Content<D>)>> {
pushee: P,
counts: Rc<RefCell<CountMap<T>>>,
phantom: ::std::marker::PhantomData<D>,
}
impl<T, D, P: Push<(T, Content<D>)>> Push<(T, Content<D>)> for Counter<T, D, P> where T : Eq+Clone+'static {
#[inline]
fn push(&mut self, message: &mut Option<(T, Content<D>)>) {
if let Some((ref time, ref data)) = *message |
// only propagate `None` if dirty (indicates flush)
if message.is_some() || self.counts.borrow().len() > 0 {
self.pushee.push(message);
}
}
}
impl<T, D, P: Push<(T, Content<D>)>> Counter<T, D, P> where T : Eq+Clone+'static {
/// Allocates a new `Counter` from a pushee and shared counts.
pub fn new(pushee: P, counts: Rc<RefCell<CountMap<T>>>) -> Counter<T, D, P> {
Counter {
pushee: pushee,
counts: counts,
phantom: ::std::marker::PhantomData,
}
}
/// Extracts shared counts into `updates`.
///
/// It is unclear why this method exists at the same time the counts are shared.
/// Perhaps this should be investigated, and only one pattern used. Seriously.
#[inline] pub fn pull_progress(&mut self, updates: &mut CountMap<T>) {
while let Some((ref time, delta)) = self.counts.borrow_mut().pop() {
updates.update(time, delta);
}
}
}
| {
self.counts.borrow_mut().update(time, data.len() as i64);
} | conditional_block |
counter.rs | //! A wrapper which counts the number of records pushed past and updates a shared count map.
use std::rc::Rc;
use std::cell::RefCell;
use progress::CountMap;
use dataflow::channels::Content;
use Push;
/// A wrapper which updates shared `counts` based on the number of records pushed.
pub struct Counter<T, D, P: Push<(T, Content<D>)>> {
pushee: P,
counts: Rc<RefCell<CountMap<T>>>,
phantom: ::std::marker::PhantomData<D>,
}
impl<T, D, P: Push<(T, Content<D>)>> Push<(T, Content<D>)> for Counter<T, D, P> where T : Eq+Clone+'static {
#[inline]
fn push(&mut self, message: &mut Option<(T, Content<D>)>) {
if let Some((ref time, ref data)) = *message {
self.counts.borrow_mut().update(time, data.len() as i64);
}
// only propagate `None` if dirty (indicates flush)
if message.is_some() || self.counts.borrow().len() > 0 {
self.pushee.push(message);
}
}
}
impl<T, D, P: Push<(T, Content<D>)>> Counter<T, D, P> where T : Eq+Clone+'static {
/// Allocates a new `Counter` from a pushee and shared counts.
pub fn | (pushee: P, counts: Rc<RefCell<CountMap<T>>>) -> Counter<T, D, P> {
Counter {
pushee: pushee,
counts: counts,
phantom: ::std::marker::PhantomData,
}
}
/// Extracts shared counts into `updates`.
///
/// It is unclear why this method exists at the same time the counts are shared.
/// Perhaps this should be investigated, and only one pattern used. Seriously.
#[inline] pub fn pull_progress(&mut self, updates: &mut CountMap<T>) {
while let Some((ref time, delta)) = self.counts.borrow_mut().pop() {
updates.update(time, delta);
}
}
}
| new | identifier_name |
counter.rs | //! A wrapper which counts the number of records pushed past and updates a shared count map.
use std::rc::Rc;
use std::cell::RefCell;
use progress::CountMap;
use dataflow::channels::Content;
use Push;
/// A wrapper which updates shared `counts` based on the number of records pushed.
pub struct Counter<T, D, P: Push<(T, Content<D>)>> {
pushee: P,
counts: Rc<RefCell<CountMap<T>>>,
phantom: ::std::marker::PhantomData<D>,
}
impl<T, D, P: Push<(T, Content<D>)>> Push<(T, Content<D>)> for Counter<T, D, P> where T : Eq+Clone+'static {
#[inline]
fn push(&mut self, message: &mut Option<(T, Content<D>)>) {
if let Some((ref time, ref data)) = *message {
self.counts.borrow_mut().update(time, data.len() as i64);
}
// only propagate `None` if dirty (indicates flush)
if message.is_some() || self.counts.borrow().len() > 0 {
self.pushee.push(message);
}
}
}
impl<T, D, P: Push<(T, Content<D>)>> Counter<T, D, P> where T : Eq+Clone+'static {
/// Allocates a new `Counter` from a pushee and shared counts.
pub fn new(pushee: P, counts: Rc<RefCell<CountMap<T>>>) -> Counter<T, D, P> {
Counter {
pushee: pushee,
counts: counts,
phantom: ::std::marker::PhantomData,
}
}
/// Extracts shared counts into `updates`.
///
/// It is unclear why this method exists at the same time the counts are shared.
/// Perhaps this should be investigated, and only one pattern used. Seriously.
#[inline] pub fn pull_progress(&mut self, updates: &mut CountMap<T>) |
}
| {
while let Some((ref time, delta)) = self.counts.borrow_mut().pop() {
updates.update(time, delta);
}
} | identifier_body |
echo.rs | #[macro_use]
extern crate log;
#[macro_use]
extern crate rux;
extern crate num_cpus;
extern crate env_logger;
use rux::{RawFd, Reset};
use rux::buf::ByteBuffer;
use rux::handler::*;
use rux::mux::*;
use rux::epoll::*;
use rux::sys::socket::*;
use rux::prop::server::*;
use rux::daemon::*;
const BUF_SIZE: usize = 2048;
const EPOLL_BUF_CAP: usize = 2048;
const EPOLL_LOOP_MS: isize = -1;
const MAX_CONN: usize = 2048;
/// Handler that echoes incoming bytes
///
/// For benchmarking I/O throuput and latency
pub struct EchoHandler {
closed: bool
}
impl<'a> Handler<MuxEvent<'a, ByteBuffer>, MuxCmd> for EchoHandler {
fn next(&mut self) -> MuxCmd {
if self.closed {
return MuxCmd::Close;
}
MuxCmd::Keep
}
fn on_next(&mut self, event: MuxEvent<'a, ByteBuffer>) {
let fd = event.fd;
let events = event.events;
let buffer = event.resource;
if events.contains(EPOLLHUP) {
trace!("socket's fd {}: EPOLLHUP", fd);
self.closed = true;
return;
}
if events.contains(EPOLLERR) {
error!("socket's fd {}: EPOLERR", fd);
self.closed = true;
return;
}
if events.contains(EPOLLIN) {
if let Some(n) = syscall!(recv(fd, From::from(&mut *buffer), MSG_DONTWAIT)).unwrap() {
buffer.extend(n);
}
}
if events.contains(EPOLLOUT) {
if buffer.is_readable() {
if let Some(cnt) = syscall!(send(fd, From::from(&*buffer), MSG_DONTWAIT)).unwrap() {
buffer.consume(cnt);
}
}
}
}
}
impl EpollHandler for EchoHandler {
fn interests() -> EpollEventKind {
EPOLLIN | EPOLLOUT | EPOLLET
}
fn with_epfd(&mut self, _: EpollFd) {
}
}
impl Reset for EchoHandler {
fn reset(&mut self) {}
}
#[derive(Clone, Debug)]
struct EchoFactory;
impl<'a> HandlerFactory<'a, EchoHandler, ByteBuffer> for EchoFactory {
fn new_resource(&self) -> ByteBuffer {
ByteBuffer::with_capacity(BUF_SIZE)
}
fn new_handler(&mut self, _: EpollFd, _: RawFd) -> EchoHandler {
EchoHandler {
closed: false
}
}
}
fn main() {
| EPOLL_BUF_CAP,
EPOLL_LOOP_MS,
MAX_CONN);
let config = ServerConfig::tcp(("127.0.0.1", 9999))
.unwrap()
.max_conn(MAX_CONN)
.io_threads(1)
//.io_threads(::std::cmp::max(1, ::num_cpus::get() / 2))
.epoll_config(EpollConfig {
loop_ms: EPOLL_LOOP_MS,
buffer_capacity: EPOLL_BUF_CAP,
});
let server = Server::new(config, EchoFactory).unwrap();
Daemon::build(server)
.with_sched(SCHED_FIFO, None)
.run().unwrap();
} | ::env_logger::init().unwrap();
info!("BUF_SIZE: {}; EPOLL_BUF_CAP: {}; EPOLL_LOOP_MS: {}; MAX_CONN: {}",
BUF_SIZE, | random_line_split |
echo.rs | #[macro_use]
extern crate log;
#[macro_use]
extern crate rux;
extern crate num_cpus;
extern crate env_logger;
use rux::{RawFd, Reset};
use rux::buf::ByteBuffer;
use rux::handler::*;
use rux::mux::*;
use rux::epoll::*;
use rux::sys::socket::*;
use rux::prop::server::*;
use rux::daemon::*;
const BUF_SIZE: usize = 2048;
const EPOLL_BUF_CAP: usize = 2048;
const EPOLL_LOOP_MS: isize = -1;
const MAX_CONN: usize = 2048;
/// Handler that echoes incoming bytes
///
/// For benchmarking I/O throuput and latency
pub struct EchoHandler {
closed: bool
}
impl<'a> Handler<MuxEvent<'a, ByteBuffer>, MuxCmd> for EchoHandler {
fn next(&mut self) -> MuxCmd {
if self.closed {
return MuxCmd::Close;
}
MuxCmd::Keep
}
fn on_next(&mut self, event: MuxEvent<'a, ByteBuffer>) {
let fd = event.fd;
let events = event.events;
let buffer = event.resource;
if events.contains(EPOLLHUP) {
trace!("socket's fd {}: EPOLLHUP", fd);
self.closed = true;
return;
}
if events.contains(EPOLLERR) {
error!("socket's fd {}: EPOLERR", fd);
self.closed = true;
return;
}
if events.contains(EPOLLIN) {
if let Some(n) = syscall!(recv(fd, From::from(&mut *buffer), MSG_DONTWAIT)).unwrap() {
buffer.extend(n);
}
}
if events.contains(EPOLLOUT) {
if buffer.is_readable() {
if let Some(cnt) = syscall!(send(fd, From::from(&*buffer), MSG_DONTWAIT)).unwrap() {
buffer.consume(cnt);
}
}
}
}
}
impl EpollHandler for EchoHandler {
fn interests() -> EpollEventKind {
EPOLLIN | EPOLLOUT | EPOLLET
}
fn | (&mut self, _: EpollFd) {
}
}
impl Reset for EchoHandler {
fn reset(&mut self) {}
}
#[derive(Clone, Debug)]
struct EchoFactory;
impl<'a> HandlerFactory<'a, EchoHandler, ByteBuffer> for EchoFactory {
fn new_resource(&self) -> ByteBuffer {
ByteBuffer::with_capacity(BUF_SIZE)
}
fn new_handler(&mut self, _: EpollFd, _: RawFd) -> EchoHandler {
EchoHandler {
closed: false
}
}
}
fn main() {
::env_logger::init().unwrap();
info!("BUF_SIZE: {}; EPOLL_BUF_CAP: {}; EPOLL_LOOP_MS: {}; MAX_CONN: {}",
BUF_SIZE,
EPOLL_BUF_CAP,
EPOLL_LOOP_MS,
MAX_CONN);
let config = ServerConfig::tcp(("127.0.0.1", 9999))
.unwrap()
.max_conn(MAX_CONN)
.io_threads(1)
//.io_threads(::std::cmp::max(1, ::num_cpus::get() / 2))
.epoll_config(EpollConfig {
loop_ms: EPOLL_LOOP_MS,
buffer_capacity: EPOLL_BUF_CAP,
});
let server = Server::new(config, EchoFactory).unwrap();
Daemon::build(server)
.with_sched(SCHED_FIFO, None)
.run().unwrap();
}
| with_epfd | identifier_name |
echo.rs | #[macro_use]
extern crate log;
#[macro_use]
extern crate rux;
extern crate num_cpus;
extern crate env_logger;
use rux::{RawFd, Reset};
use rux::buf::ByteBuffer;
use rux::handler::*;
use rux::mux::*;
use rux::epoll::*;
use rux::sys::socket::*;
use rux::prop::server::*;
use rux::daemon::*;
const BUF_SIZE: usize = 2048;
const EPOLL_BUF_CAP: usize = 2048;
const EPOLL_LOOP_MS: isize = -1;
const MAX_CONN: usize = 2048;
/// Handler that echoes incoming bytes
///
/// For benchmarking I/O throuput and latency
pub struct EchoHandler {
closed: bool
}
impl<'a> Handler<MuxEvent<'a, ByteBuffer>, MuxCmd> for EchoHandler {
fn next(&mut self) -> MuxCmd {
if self.closed |
MuxCmd::Keep
}
fn on_next(&mut self, event: MuxEvent<'a, ByteBuffer>) {
let fd = event.fd;
let events = event.events;
let buffer = event.resource;
if events.contains(EPOLLHUP) {
trace!("socket's fd {}: EPOLLHUP", fd);
self.closed = true;
return;
}
if events.contains(EPOLLERR) {
error!("socket's fd {}: EPOLERR", fd);
self.closed = true;
return;
}
if events.contains(EPOLLIN) {
if let Some(n) = syscall!(recv(fd, From::from(&mut *buffer), MSG_DONTWAIT)).unwrap() {
buffer.extend(n);
}
}
if events.contains(EPOLLOUT) {
if buffer.is_readable() {
if let Some(cnt) = syscall!(send(fd, From::from(&*buffer), MSG_DONTWAIT)).unwrap() {
buffer.consume(cnt);
}
}
}
}
}
impl EpollHandler for EchoHandler {
fn interests() -> EpollEventKind {
EPOLLIN | EPOLLOUT | EPOLLET
}
fn with_epfd(&mut self, _: EpollFd) {
}
}
impl Reset for EchoHandler {
fn reset(&mut self) {}
}
#[derive(Clone, Debug)]
struct EchoFactory;
impl<'a> HandlerFactory<'a, EchoHandler, ByteBuffer> for EchoFactory {
fn new_resource(&self) -> ByteBuffer {
ByteBuffer::with_capacity(BUF_SIZE)
}
fn new_handler(&mut self, _: EpollFd, _: RawFd) -> EchoHandler {
EchoHandler {
closed: false
}
}
}
fn main() {
::env_logger::init().unwrap();
info!("BUF_SIZE: {}; EPOLL_BUF_CAP: {}; EPOLL_LOOP_MS: {}; MAX_CONN: {}",
BUF_SIZE,
EPOLL_BUF_CAP,
EPOLL_LOOP_MS,
MAX_CONN);
let config = ServerConfig::tcp(("127.0.0.1", 9999))
.unwrap()
.max_conn(MAX_CONN)
.io_threads(1)
//.io_threads(::std::cmp::max(1, ::num_cpus::get() / 2))
.epoll_config(EpollConfig {
loop_ms: EPOLL_LOOP_MS,
buffer_capacity: EPOLL_BUF_CAP,
});
let server = Server::new(config, EchoFactory).unwrap();
Daemon::build(server)
.with_sched(SCHED_FIFO, None)
.run().unwrap();
}
| {
return MuxCmd::Close;
} | conditional_block |
echo.rs | #[macro_use]
extern crate log;
#[macro_use]
extern crate rux;
extern crate num_cpus;
extern crate env_logger;
use rux::{RawFd, Reset};
use rux::buf::ByteBuffer;
use rux::handler::*;
use rux::mux::*;
use rux::epoll::*;
use rux::sys::socket::*;
use rux::prop::server::*;
use rux::daemon::*;
const BUF_SIZE: usize = 2048;
const EPOLL_BUF_CAP: usize = 2048;
const EPOLL_LOOP_MS: isize = -1;
const MAX_CONN: usize = 2048;
/// Handler that echoes incoming bytes
///
/// For benchmarking I/O throuput and latency
pub struct EchoHandler {
closed: bool
}
impl<'a> Handler<MuxEvent<'a, ByteBuffer>, MuxCmd> for EchoHandler {
fn next(&mut self) -> MuxCmd {
if self.closed {
return MuxCmd::Close;
}
MuxCmd::Keep
}
fn on_next(&mut self, event: MuxEvent<'a, ByteBuffer>) {
let fd = event.fd;
let events = event.events;
let buffer = event.resource;
if events.contains(EPOLLHUP) {
trace!("socket's fd {}: EPOLLHUP", fd);
self.closed = true;
return;
}
if events.contains(EPOLLERR) {
error!("socket's fd {}: EPOLERR", fd);
self.closed = true;
return;
}
if events.contains(EPOLLIN) {
if let Some(n) = syscall!(recv(fd, From::from(&mut *buffer), MSG_DONTWAIT)).unwrap() {
buffer.extend(n);
}
}
if events.contains(EPOLLOUT) {
if buffer.is_readable() {
if let Some(cnt) = syscall!(send(fd, From::from(&*buffer), MSG_DONTWAIT)).unwrap() {
buffer.consume(cnt);
}
}
}
}
}
impl EpollHandler for EchoHandler {
fn interests() -> EpollEventKind |
fn with_epfd(&mut self, _: EpollFd) {
}
}
impl Reset for EchoHandler {
fn reset(&mut self) {}
}
#[derive(Clone, Debug)]
struct EchoFactory;
impl<'a> HandlerFactory<'a, EchoHandler, ByteBuffer> for EchoFactory {
fn new_resource(&self) -> ByteBuffer {
ByteBuffer::with_capacity(BUF_SIZE)
}
fn new_handler(&mut self, _: EpollFd, _: RawFd) -> EchoHandler {
EchoHandler {
closed: false
}
}
}
fn main() {
::env_logger::init().unwrap();
info!("BUF_SIZE: {}; EPOLL_BUF_CAP: {}; EPOLL_LOOP_MS: {}; MAX_CONN: {}",
BUF_SIZE,
EPOLL_BUF_CAP,
EPOLL_LOOP_MS,
MAX_CONN);
let config = ServerConfig::tcp(("127.0.0.1", 9999))
.unwrap()
.max_conn(MAX_CONN)
.io_threads(1)
//.io_threads(::std::cmp::max(1, ::num_cpus::get() / 2))
.epoll_config(EpollConfig {
loop_ms: EPOLL_LOOP_MS,
buffer_capacity: EPOLL_BUF_CAP,
});
let server = Server::new(config, EchoFactory).unwrap();
Daemon::build(server)
.with_sched(SCHED_FIFO, None)
.run().unwrap();
}
| {
EPOLLIN | EPOLLOUT | EPOLLET
} | identifier_body |
batch.rs | //! # `batch` - batches of files to marshal.
//!
//! A `Batch` represents a collection of files which are being marshalled and a stream of hashed
//! output objects. For a single `Batch`, there should correspond a set of valid marshalled files
//! and a stream of valid hashed objects produced from marshalling/hashing the batched files.
//!
//! Marshalling files in a `Batch` will spawn files to a specified threadpool provided by the
//! `Context`.
use std::fs::File;
use std::path::Path;
use futures::prelude::*;
use futures::sync::mpsc::{self, Sender, Receiver};
use futures_cpupool::CpuPool;
use memmap::{Mmap, Protection};
use BATCH_FUTURE_BUFFER_SIZE;
use arc_slice::{self, ArcSlice};
use errors::*;
use marshal::{Hashed, Hasher, ObjectHash, SmallRecord, DataTree, DirTree};
use split::{self, Chunked};
use trace::BatchTrace;
/// A batch of files being marshalled.
pub struct Batch<T: BatchTrace = ()> {
trace: T,
marshal_tx: Sender<Hashed>,
marshal_rx: Receiver<Hashed>,
marshal_pool: CpuPool,
len: usize,
}
impl<T: BatchTrace> Batch<T> {
pub fn new(marshal_pool: &CpuPool, trace: T) -> Self {
let (marshal_tx, marshal_rx) = mpsc::channel(BATCH_FUTURE_BUFFER_SIZE);
Batch {
trace,
marshal_tx,
marshal_rx,
marshal_pool,
len: 0,
}
}
/// Read a file as a byte slice. This will memory-map the underlying file.
///
/// * `file` - the file to read. *Must be opened with read permissions!*
fn read(&mut self, file: &File) -> Result<ArcSlice> {
Ok(arc_slice::mapped(Mmap::open(file, Protection::Read)?))
}
/// Read a file as a byte slice. This will memory-map the underlying file.
fn read_path<P: AsRef<Path>>(&mut self, path: P) -> Result<ArcSlice> {
self.read(&File::open(path)?)
}
/// Chunk the file at the given path.
pub fn chunk_file<P: AsRef<Path>>(&mut self, path: P) -> Result<Chunked> {
let slice = self.read_path(path)?;
let mut trace = self.trace.on_split(slice.len() as u64);
let chunked = split::chunk_with_trace(slice, &mut trace);
Ok(chunked)
}
pub fn load_file<P: AsRef<Path>>(&mut self, path: P) -> Result<DataTree> {
let chunked = self.chunk_file(path)?;
let data_tree = DataTree::load(chunked.to_vec().into_iter().map(SmallRecord::from));
Ok(data_tree)
}
pub fn load_subtree<P: AsRef<Path>>(&mut self, path: P) -> Result<DirTree> {
let path_ref = path.as_ref();
let mut dir_tree = DirTree::new();
for entry_res in path_ref.read_dir()? {
let entry = entry_res?;
let path = entry.path();
let entry_path = path.strip_prefix(path_ref).unwrap();
if path.is_dir() {
dir_tree.insert(entry_path, self.load_subtree(&path)?);
} else {
dir_tree.insert(entry_path, self.load_file(&path)?);
}
}
Ok(dir_tree)
}
/// Marshal a chunked file into a tree of objects, returning the marshalled objects along with
/// the hash of the root object.
pub fn marshal_file(
&mut self,
chunked: Chunked,
) -> Box<Future<Item = ObjectHash, Error = Error> + Send> |
pub fn marshal_subtree<P: AsRef<Path>>(
&mut self,
path: P,
) -> Box<Future<Item = ObjectHash, Error = Error> + Send> {
let result = self.load_subtree(path)
.map(|tree| {
let tree_total = tree.total();
self.len += tree_total;
let marshal = tree.marshal(Hasher::with_trace(
self.marshal_tx.clone(),
self.trace.on_marshal(tree_total),
));
self.marshal_pool.spawn(marshal)
})
.into_future()
.flatten();
Box::new(result)
}
/// The total size of the batch, in chunks.
pub fn len(&self) -> usize {
self.len
}
/// Convert this `Batch` into a stream of `Hashed` objects.
pub fn into_stream(self) -> Box<Stream<Item = Hashed, Error = Error> + Send> {
Box::new(self.marshal_rx.map_err(|()| "Upstream error!".into()))
}
}
| {
let tree = DataTree::load(chunked.to_vec().into_iter().map(SmallRecord::from));
let tree_total = tree.total();
self.len += tree_total;
let marshal = tree.marshal(Hasher::with_trace(
self.marshal_tx.clone(),
self.trace.on_marshal(tree_total),
));
Box::new(self.marshal_pool.spawn(marshal))
} | identifier_body |
batch.rs | //! # `batch` - batches of files to marshal.
//!
//! A `Batch` represents a collection of files which are being marshalled and a stream of hashed
//! output objects. For a single `Batch`, there should correspond a set of valid marshalled files
//! and a stream of valid hashed objects produced from marshalling/hashing the batched files.
//!
//! Marshalling files in a `Batch` will spawn files to a specified threadpool provided by the
//! `Context`.
use std::fs::File;
use std::path::Path;
use futures::prelude::*;
use futures::sync::mpsc::{self, Sender, Receiver};
use futures_cpupool::CpuPool;
use memmap::{Mmap, Protection};
use BATCH_FUTURE_BUFFER_SIZE;
use arc_slice::{self, ArcSlice};
use errors::*;
use marshal::{Hashed, Hasher, ObjectHash, SmallRecord, DataTree, DirTree};
use split::{self, Chunked};
use trace::BatchTrace;
/// A batch of files being marshalled.
pub struct Batch<T: BatchTrace = ()> {
trace: T,
marshal_tx: Sender<Hashed>,
marshal_rx: Receiver<Hashed>,
marshal_pool: CpuPool,
len: usize,
}
impl<T: BatchTrace> Batch<T> {
pub fn new(marshal_pool: &CpuPool, trace: T) -> Self {
let (marshal_tx, marshal_rx) = mpsc::channel(BATCH_FUTURE_BUFFER_SIZE);
Batch {
trace,
marshal_tx,
marshal_rx,
marshal_pool,
len: 0,
}
}
/// Read a file as a byte slice. This will memory-map the underlying file.
///
/// * `file` - the file to read. *Must be opened with read permissions!*
fn read(&mut self, file: &File) -> Result<ArcSlice> {
Ok(arc_slice::mapped(Mmap::open(file, Protection::Read)?))
}
/// Read a file as a byte slice. This will memory-map the underlying file.
fn read_path<P: AsRef<Path>>(&mut self, path: P) -> Result<ArcSlice> {
self.read(&File::open(path)?)
}
/// Chunk the file at the given path.
pub fn chunk_file<P: AsRef<Path>>(&mut self, path: P) -> Result<Chunked> {
let slice = self.read_path(path)?;
let mut trace = self.trace.on_split(slice.len() as u64);
let chunked = split::chunk_with_trace(slice, &mut trace);
Ok(chunked)
}
pub fn load_file<P: AsRef<Path>>(&mut self, path: P) -> Result<DataTree> {
let chunked = self.chunk_file(path)?;
let data_tree = DataTree::load(chunked.to_vec().into_iter().map(SmallRecord::from));
Ok(data_tree)
}
pub fn load_subtree<P: AsRef<Path>>(&mut self, path: P) -> Result<DirTree> {
let path_ref = path.as_ref();
let mut dir_tree = DirTree::new();
for entry_res in path_ref.read_dir()? {
let entry = entry_res?;
let path = entry.path();
let entry_path = path.strip_prefix(path_ref).unwrap();
if path.is_dir() {
dir_tree.insert(entry_path, self.load_subtree(&path)?);
} else |
}
Ok(dir_tree)
}
/// Marshal a chunked file into a tree of objects, returning the marshalled objects along with
/// the hash of the root object.
pub fn marshal_file(
&mut self,
chunked: Chunked,
) -> Box<Future<Item = ObjectHash, Error = Error> + Send> {
let tree = DataTree::load(chunked.to_vec().into_iter().map(SmallRecord::from));
let tree_total = tree.total();
self.len += tree_total;
let marshal = tree.marshal(Hasher::with_trace(
self.marshal_tx.clone(),
self.trace.on_marshal(tree_total),
));
Box::new(self.marshal_pool.spawn(marshal))
}
pub fn marshal_subtree<P: AsRef<Path>>(
&mut self,
path: P,
) -> Box<Future<Item = ObjectHash, Error = Error> + Send> {
let result = self.load_subtree(path)
.map(|tree| {
let tree_total = tree.total();
self.len += tree_total;
let marshal = tree.marshal(Hasher::with_trace(
self.marshal_tx.clone(),
self.trace.on_marshal(tree_total),
));
self.marshal_pool.spawn(marshal)
})
.into_future()
.flatten();
Box::new(result)
}
/// The total size of the batch, in chunks.
pub fn len(&self) -> usize {
self.len
}
/// Convert this `Batch` into a stream of `Hashed` objects.
pub fn into_stream(self) -> Box<Stream<Item = Hashed, Error = Error> + Send> {
Box::new(self.marshal_rx.map_err(|()| "Upstream error!".into()))
}
}
| {
dir_tree.insert(entry_path, self.load_file(&path)?);
} | conditional_block |
batch.rs | //! # `batch` - batches of files to marshal.
//!
//! A `Batch` represents a collection of files which are being marshalled and a stream of hashed
//! output objects. For a single `Batch`, there should correspond a set of valid marshalled files
//! and a stream of valid hashed objects produced from marshalling/hashing the batched files.
//!
//! Marshalling files in a `Batch` will spawn files to a specified threadpool provided by the
//! `Context`.
use std::fs::File;
use std::path::Path;
use futures::prelude::*;
use futures::sync::mpsc::{self, Sender, Receiver};
use futures_cpupool::CpuPool;
use memmap::{Mmap, Protection};
use BATCH_FUTURE_BUFFER_SIZE;
use arc_slice::{self, ArcSlice};
use errors::*;
use marshal::{Hashed, Hasher, ObjectHash, SmallRecord, DataTree, DirTree};
use split::{self, Chunked};
use trace::BatchTrace;
/// A batch of files being marshalled.
pub struct Batch<T: BatchTrace = ()> {
trace: T,
marshal_tx: Sender<Hashed>,
marshal_rx: Receiver<Hashed>,
marshal_pool: CpuPool,
len: usize,
}
impl<T: BatchTrace> Batch<T> {
pub fn new(marshal_pool: &CpuPool, trace: T) -> Self {
let (marshal_tx, marshal_rx) = mpsc::channel(BATCH_FUTURE_BUFFER_SIZE);
Batch {
trace,
marshal_tx,
marshal_rx,
marshal_pool,
len: 0,
}
}
/// Read a file as a byte slice. This will memory-map the underlying file.
///
/// * `file` - the file to read. *Must be opened with read permissions!*
fn read(&mut self, file: &File) -> Result<ArcSlice> {
Ok(arc_slice::mapped(Mmap::open(file, Protection::Read)?))
}
/// Read a file as a byte slice. This will memory-map the underlying file.
fn read_path<P: AsRef<Path>>(&mut self, path: P) -> Result<ArcSlice> {
self.read(&File::open(path)?)
}
/// Chunk the file at the given path.
pub fn chunk_file<P: AsRef<Path>>(&mut self, path: P) -> Result<Chunked> {
let slice = self.read_path(path)?;
let mut trace = self.trace.on_split(slice.len() as u64);
let chunked = split::chunk_with_trace(slice, &mut trace);
Ok(chunked)
}
pub fn load_file<P: AsRef<Path>>(&mut self, path: P) -> Result<DataTree> {
let chunked = self.chunk_file(path)?;
let data_tree = DataTree::load(chunked.to_vec().into_iter().map(SmallRecord::from));
Ok(data_tree)
}
pub fn load_subtree<P: AsRef<Path>>(&mut self, path: P) -> Result<DirTree> {
let path_ref = path.as_ref();
let mut dir_tree = DirTree::new();
for entry_res in path_ref.read_dir()? {
let entry = entry_res?; | let entry_path = path.strip_prefix(path_ref).unwrap();
if path.is_dir() {
dir_tree.insert(entry_path, self.load_subtree(&path)?);
} else {
dir_tree.insert(entry_path, self.load_file(&path)?);
}
}
Ok(dir_tree)
}
/// Marshal a chunked file into a tree of objects, returning the marshalled objects along with
/// the hash of the root object.
pub fn marshal_file(
&mut self,
chunked: Chunked,
) -> Box<Future<Item = ObjectHash, Error = Error> + Send> {
let tree = DataTree::load(chunked.to_vec().into_iter().map(SmallRecord::from));
let tree_total = tree.total();
self.len += tree_total;
let marshal = tree.marshal(Hasher::with_trace(
self.marshal_tx.clone(),
self.trace.on_marshal(tree_total),
));
Box::new(self.marshal_pool.spawn(marshal))
}
pub fn marshal_subtree<P: AsRef<Path>>(
&mut self,
path: P,
) -> Box<Future<Item = ObjectHash, Error = Error> + Send> {
let result = self.load_subtree(path)
.map(|tree| {
let tree_total = tree.total();
self.len += tree_total;
let marshal = tree.marshal(Hasher::with_trace(
self.marshal_tx.clone(),
self.trace.on_marshal(tree_total),
));
self.marshal_pool.spawn(marshal)
})
.into_future()
.flatten();
Box::new(result)
}
/// The total size of the batch, in chunks.
pub fn len(&self) -> usize {
self.len
}
/// Convert this `Batch` into a stream of `Hashed` objects.
pub fn into_stream(self) -> Box<Stream<Item = Hashed, Error = Error> + Send> {
Box::new(self.marshal_rx.map_err(|()| "Upstream error!".into()))
}
} | let path = entry.path(); | random_line_split |
batch.rs | //! # `batch` - batches of files to marshal.
//!
//! A `Batch` represents a collection of files which are being marshalled and a stream of hashed
//! output objects. For a single `Batch`, there should correspond a set of valid marshalled files
//! and a stream of valid hashed objects produced from marshalling/hashing the batched files.
//!
//! Marshalling files in a `Batch` will spawn files to a specified threadpool provided by the
//! `Context`.
use std::fs::File;
use std::path::Path;
use futures::prelude::*;
use futures::sync::mpsc::{self, Sender, Receiver};
use futures_cpupool::CpuPool;
use memmap::{Mmap, Protection};
use BATCH_FUTURE_BUFFER_SIZE;
use arc_slice::{self, ArcSlice};
use errors::*;
use marshal::{Hashed, Hasher, ObjectHash, SmallRecord, DataTree, DirTree};
use split::{self, Chunked};
use trace::BatchTrace;
/// A batch of files being marshalled.
pub struct Batch<T: BatchTrace = ()> {
trace: T,
marshal_tx: Sender<Hashed>,
marshal_rx: Receiver<Hashed>,
marshal_pool: CpuPool,
len: usize,
}
impl<T: BatchTrace> Batch<T> {
pub fn new(marshal_pool: &CpuPool, trace: T) -> Self {
let (marshal_tx, marshal_rx) = mpsc::channel(BATCH_FUTURE_BUFFER_SIZE);
Batch {
trace,
marshal_tx,
marshal_rx,
marshal_pool,
len: 0,
}
}
/// Read a file as a byte slice. This will memory-map the underlying file.
///
/// * `file` - the file to read. *Must be opened with read permissions!*
fn read(&mut self, file: &File) -> Result<ArcSlice> {
Ok(arc_slice::mapped(Mmap::open(file, Protection::Read)?))
}
/// Read a file as a byte slice. This will memory-map the underlying file.
fn read_path<P: AsRef<Path>>(&mut self, path: P) -> Result<ArcSlice> {
self.read(&File::open(path)?)
}
/// Chunk the file at the given path.
pub fn | <P: AsRef<Path>>(&mut self, path: P) -> Result<Chunked> {
let slice = self.read_path(path)?;
let mut trace = self.trace.on_split(slice.len() as u64);
let chunked = split::chunk_with_trace(slice, &mut trace);
Ok(chunked)
}
pub fn load_file<P: AsRef<Path>>(&mut self, path: P) -> Result<DataTree> {
let chunked = self.chunk_file(path)?;
let data_tree = DataTree::load(chunked.to_vec().into_iter().map(SmallRecord::from));
Ok(data_tree)
}
pub fn load_subtree<P: AsRef<Path>>(&mut self, path: P) -> Result<DirTree> {
let path_ref = path.as_ref();
let mut dir_tree = DirTree::new();
for entry_res in path_ref.read_dir()? {
let entry = entry_res?;
let path = entry.path();
let entry_path = path.strip_prefix(path_ref).unwrap();
if path.is_dir() {
dir_tree.insert(entry_path, self.load_subtree(&path)?);
} else {
dir_tree.insert(entry_path, self.load_file(&path)?);
}
}
Ok(dir_tree)
}
/// Marshal a chunked file into a tree of objects, returning the marshalled objects along with
/// the hash of the root object.
pub fn marshal_file(
&mut self,
chunked: Chunked,
) -> Box<Future<Item = ObjectHash, Error = Error> + Send> {
let tree = DataTree::load(chunked.to_vec().into_iter().map(SmallRecord::from));
let tree_total = tree.total();
self.len += tree_total;
let marshal = tree.marshal(Hasher::with_trace(
self.marshal_tx.clone(),
self.trace.on_marshal(tree_total),
));
Box::new(self.marshal_pool.spawn(marshal))
}
pub fn marshal_subtree<P: AsRef<Path>>(
&mut self,
path: P,
) -> Box<Future<Item = ObjectHash, Error = Error> + Send> {
let result = self.load_subtree(path)
.map(|tree| {
let tree_total = tree.total();
self.len += tree_total;
let marshal = tree.marshal(Hasher::with_trace(
self.marshal_tx.clone(),
self.trace.on_marshal(tree_total),
));
self.marshal_pool.spawn(marshal)
})
.into_future()
.flatten();
Box::new(result)
}
/// The total size of the batch, in chunks.
pub fn len(&self) -> usize {
self.len
}
/// Convert this `Batch` into a stream of `Hashed` objects.
pub fn into_stream(self) -> Box<Stream<Item = Hashed, Error = Error> + Send> {
Box::new(self.marshal_rx.map_err(|()| "Upstream error!".into()))
}
}
| chunk_file | identifier_name |
htmldivelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLDivElementBinding::{self, HTMLDivElementMethods};
use dom::bindings::js::Root;
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::htmlelement::HTMLElement;
use dom::node::Node;
use dom_struct::dom_struct;
use html5ever_atoms::LocalName;
#[dom_struct]
pub struct HTMLDivElement {
htmlelement: HTMLElement
}
impl HTMLDivElement {
fn new_inherited(local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> HTMLDivElement {
HTMLDivElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLDivElement> {
Node::reflect_node(box HTMLDivElement::new_inherited(local_name, prefix, document),
document,
HTMLDivElementBinding::Wrap)
}
}
impl HTMLDivElementMethods for HTMLDivElement {
// https://html.spec.whatwg.org/multipage/#dom-div-align
make_getter!(Align, "align");
// https://html.spec.whatwg.org/multipage/#dom-div-align
make_setter!(SetAlign, "align"); | } | random_line_split |
|
htmldivelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLDivElementBinding::{self, HTMLDivElementMethods};
use dom::bindings::js::Root;
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::htmlelement::HTMLElement;
use dom::node::Node;
use dom_struct::dom_struct;
use html5ever_atoms::LocalName;
#[dom_struct]
pub struct HTMLDivElement {
htmlelement: HTMLElement
}
impl HTMLDivElement {
fn new_inherited(local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> HTMLDivElement {
HTMLDivElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn | (local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLDivElement> {
Node::reflect_node(box HTMLDivElement::new_inherited(local_name, prefix, document),
document,
HTMLDivElementBinding::Wrap)
}
}
impl HTMLDivElementMethods for HTMLDivElement {
// https://html.spec.whatwg.org/multipage/#dom-div-align
make_getter!(Align, "align");
// https://html.spec.whatwg.org/multipage/#dom-div-align
make_setter!(SetAlign, "align");
}
| new | identifier_name |
config.rs | // Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
//! Storage configuration.
use crate::server::ttl::TTLCheckerTask;
use crate::server::CONFIG_ROCKSDB_GAUGE;
use configuration::{ConfigChange, ConfigManager, ConfigValue, Configuration, Result as CfgResult};
use engine_rocks::raw::{Cache, LRUCacheOptions, MemoryAllocator};
use engine_rocks::RocksEngine;
use engine_traits::{CFOptionsExt, ColumnFamilyOptions, CF_DEFAULT};
use libc::c_int;
use std::error::Error;
use tikv_util::config::{self, OptionReadableSize, ReadableDuration, ReadableSize};
use tikv_util::sys::sys_quota::SysQuota;
use tikv_util::worker::Scheduler;
pub const DEFAULT_DATA_DIR: &str = "./";
const DEFAULT_GC_RATIO_THRESHOLD: f64 = 1.1;
const DEFAULT_MAX_KEY_SIZE: usize = 4 * 1024;
const DEFAULT_SCHED_CONCURRENCY: usize = 1024 * 512;
const MAX_SCHED_CONCURRENCY: usize = 2 * 1024 * 1024;
// According to "Little's law", assuming you can write 100MB per
// second, and it takes about 100ms to process the write requests
// on average, in that situation the writing bytes estimated 10MB,
// here we use 100MB as default value for tolerate 1s latency.
const DEFAULT_SCHED_PENDING_WRITE_MB: u64 = 100;
const DEFAULT_RESERVED_SPACE_GB: u64 = 5;
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Configuration)]
#[serde(default)]
#[serde(rename_all = "kebab-case")]
pub struct Config {
#[config(skip)]
pub data_dir: String,
// Replaced by `GcConfig.ratio_threshold`. Keep it for backward compatibility.
#[config(skip)]
pub gc_ratio_threshold: f64,
#[config(skip)]
pub max_key_size: usize,
#[config(skip)]
pub scheduler_concurrency: usize,
#[config(skip)]
pub scheduler_worker_pool_size: usize,
#[config(skip)]
pub scheduler_pending_write_threshold: ReadableSize,
#[config(skip)]
// Reserve disk space to make tikv would have enough space to compact when disk is full.
pub reserve_space: ReadableSize,
#[config(skip)]
pub enable_async_apply_prewrite: bool,
#[config(skip)]
pub enable_ttl: bool,
/// Interval to check TTL for all SSTs,
pub ttl_check_poll_interval: ReadableDuration,
#[config(submodule)]
pub block_cache: BlockCacheConfig,
}
impl Default for Config {
fn default() -> Config {
let cpu_num = SysQuota::new().cpu_cores_quota();
Config {
data_dir: DEFAULT_DATA_DIR.to_owned(),
gc_ratio_threshold: DEFAULT_GC_RATIO_THRESHOLD,
max_key_size: DEFAULT_MAX_KEY_SIZE,
scheduler_concurrency: DEFAULT_SCHED_CONCURRENCY,
scheduler_worker_pool_size: if cpu_num >= 16.0 { 8 } else { 4 },
scheduler_pending_write_threshold: ReadableSize::mb(DEFAULT_SCHED_PENDING_WRITE_MB),
reserve_space: ReadableSize::gb(DEFAULT_RESERVED_SPACE_GB),
enable_async_apply_prewrite: false,
enable_ttl: false,
ttl_check_poll_interval: ReadableDuration::hours(12),
block_cache: BlockCacheConfig::default(),
}
}
}
impl Config {
pub fn validate(&mut self) -> Result<(), Box<dyn Error>> {
if self.data_dir!= DEFAULT_DATA_DIR {
self.data_dir = config::canonicalize_path(&self.data_dir)?
}
if self.scheduler_concurrency > MAX_SCHED_CONCURRENCY {
warn!(
"TiKV has optimized latch since v4.0, so it is not necessary to set large schedule \
concurrency. To save memory, change it from {:?} to {:?}",
self.scheduler_concurrency, MAX_SCHED_CONCURRENCY
);
self.scheduler_concurrency = MAX_SCHED_CONCURRENCY;
}
Ok(())
}
}
pub struct StorageConfigManger {
kvdb: RocksEngine,
shared_block_cache: bool,
ttl_checker_scheduler: Scheduler<TTLCheckerTask>,
}
impl StorageConfigManger {
pub fn new(
kvdb: RocksEngine,
shared_block_cache: bool,
ttl_checker_scheduler: Scheduler<TTLCheckerTask>,
) -> StorageConfigManger {
StorageConfigManger {
kvdb,
shared_block_cache,
ttl_checker_scheduler,
}
}
}
impl ConfigManager for StorageConfigManger {
fn dispatch(&mut self, mut change: ConfigChange) -> CfgResult<()> {
if let Some(ConfigValue::Module(mut block_cache)) = change.remove("block_cache") {
if!self.shared_block_cache {
return Err("shared block cache is disabled".into());
}
if let Some(size) = block_cache.remove("capacity") {
let s: OptionReadableSize = size.into();
if let Some(size) = s.0 {
// Hack: since all CFs in both kvdb and raftdb share a block cache, we can change
// the size through any of them. Here we change it through default CF in kvdb.
// A better way to do it is to hold the cache reference somewhere, and use it to
// change cache size.
let opt = self.kvdb.get_options_cf(CF_DEFAULT).unwrap(); // FIXME unwrap
opt.set_block_cache_capacity(size.0)?;
// Write config to metric
CONFIG_ROCKSDB_GAUGE
.with_label_values(&[CF_DEFAULT, "block_cache_size"])
.set(size.0 as f64);
}
}
} else if let Some(v) = change.remove("ttl_check_poll_interval") {
let interval: ReadableDuration = v.into();
self.ttl_checker_scheduler
.schedule(TTLCheckerTask::UpdatePollInterval(interval.into()))
.unwrap();
}
Ok(())
}
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Configuration)]
#[serde(default)]
#[serde(rename_all = "kebab-case")]
pub struct BlockCacheConfig {
#[config(skip)]
pub shared: bool,
pub capacity: OptionReadableSize,
#[config(skip)]
pub num_shard_bits: i32,
#[config(skip)]
pub strict_capacity_limit: bool,
#[config(skip)]
pub high_pri_pool_ratio: f64,
#[config(skip)]
pub memory_allocator: Option<String>,
}
impl Default for BlockCacheConfig {
fn default() -> BlockCacheConfig {
BlockCacheConfig {
shared: true,
capacity: OptionReadableSize(None),
num_shard_bits: 6,
strict_capacity_limit: false,
high_pri_pool_ratio: 0.8,
memory_allocator: Some(String::from("nodump")),
}
}
}
impl BlockCacheConfig {
pub fn build_shared_cache(&self) -> Option<Cache> {
if!self.shared {
return None;
}
let capacity = match self.capacity.0 {
None => {
let total_mem = SysQuota::new().memory_limit_in_bytes();
((total_mem as f64) * 0.45) as usize
}
Some(c) => c.0 as usize,
};
let mut cache_opts = LRUCacheOptions::new();
cache_opts.set_capacity(capacity);
cache_opts.set_num_shard_bits(self.num_shard_bits as c_int);
cache_opts.set_strict_capacity_limit(self.strict_capacity_limit);
cache_opts.set_high_pri_pool_ratio(self.high_pri_pool_ratio);
if let Some(allocator) = self.new_memory_allocator() {
cache_opts.set_memory_allocator(allocator);
}
Some(Cache::new_lru_cache(cache_opts))
}
fn new_memory_allocator(&self) -> Option<MemoryAllocator> {
if let Some(ref alloc) = self.memory_allocator {
match alloc.as_str() {
#[cfg(feature = "jemalloc")]
"nodump" => match MemoryAllocator::new_jemalloc_memory_allocator() {
Ok(allocator) => {
return Some(allocator);
}
Err(e) => |
},
"" => {}
other => {
warn!(
"Memory allocator {} is not supported, continue with default allocator",
other
);
}
}
};
None
}
}
| {
warn!(
"Create jemalloc nodump allocator for block cache failed: {}, continue with default allocator",
e
);
} | conditional_block |
config.rs | // Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
//! Storage configuration.
use crate::server::ttl::TTLCheckerTask;
use crate::server::CONFIG_ROCKSDB_GAUGE;
use configuration::{ConfigChange, ConfigManager, ConfigValue, Configuration, Result as CfgResult};
use engine_rocks::raw::{Cache, LRUCacheOptions, MemoryAllocator};
use engine_rocks::RocksEngine;
use engine_traits::{CFOptionsExt, ColumnFamilyOptions, CF_DEFAULT};
use libc::c_int;
use std::error::Error;
use tikv_util::config::{self, OptionReadableSize, ReadableDuration, ReadableSize};
use tikv_util::sys::sys_quota::SysQuota;
use tikv_util::worker::Scheduler;
pub const DEFAULT_DATA_DIR: &str = "./";
const DEFAULT_GC_RATIO_THRESHOLD: f64 = 1.1;
const DEFAULT_MAX_KEY_SIZE: usize = 4 * 1024;
const DEFAULT_SCHED_CONCURRENCY: usize = 1024 * 512;
const MAX_SCHED_CONCURRENCY: usize = 2 * 1024 * 1024;
// According to "Little's law", assuming you can write 100MB per
// second, and it takes about 100ms to process the write requests
// on average, in that situation the writing bytes estimated 10MB,
// here we use 100MB as default value for tolerate 1s latency.
const DEFAULT_SCHED_PENDING_WRITE_MB: u64 = 100;
const DEFAULT_RESERVED_SPACE_GB: u64 = 5;
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Configuration)]
#[serde(default)]
#[serde(rename_all = "kebab-case")]
pub struct Config {
#[config(skip)]
pub data_dir: String,
// Replaced by `GcConfig.ratio_threshold`. Keep it for backward compatibility.
#[config(skip)]
pub gc_ratio_threshold: f64,
#[config(skip)]
pub max_key_size: usize,
#[config(skip)]
pub scheduler_concurrency: usize,
#[config(skip)]
pub scheduler_worker_pool_size: usize,
#[config(skip)]
pub scheduler_pending_write_threshold: ReadableSize,
#[config(skip)]
// Reserve disk space to make tikv would have enough space to compact when disk is full.
pub reserve_space: ReadableSize,
#[config(skip)]
pub enable_async_apply_prewrite: bool,
#[config(skip)]
pub enable_ttl: bool,
/// Interval to check TTL for all SSTs,
pub ttl_check_poll_interval: ReadableDuration,
#[config(submodule)]
pub block_cache: BlockCacheConfig,
}
impl Default for Config {
fn default() -> Config {
let cpu_num = SysQuota::new().cpu_cores_quota();
Config {
data_dir: DEFAULT_DATA_DIR.to_owned(),
gc_ratio_threshold: DEFAULT_GC_RATIO_THRESHOLD,
max_key_size: DEFAULT_MAX_KEY_SIZE,
scheduler_concurrency: DEFAULT_SCHED_CONCURRENCY,
scheduler_worker_pool_size: if cpu_num >= 16.0 { 8 } else { 4 },
scheduler_pending_write_threshold: ReadableSize::mb(DEFAULT_SCHED_PENDING_WRITE_MB),
reserve_space: ReadableSize::gb(DEFAULT_RESERVED_SPACE_GB),
enable_async_apply_prewrite: false,
enable_ttl: false,
ttl_check_poll_interval: ReadableDuration::hours(12),
block_cache: BlockCacheConfig::default(),
}
}
}
impl Config {
pub fn validate(&mut self) -> Result<(), Box<dyn Error>> { | "TiKV has optimized latch since v4.0, so it is not necessary to set large schedule \
concurrency. To save memory, change it from {:?} to {:?}",
self.scheduler_concurrency, MAX_SCHED_CONCURRENCY
);
self.scheduler_concurrency = MAX_SCHED_CONCURRENCY;
}
Ok(())
}
}
pub struct StorageConfigManger {
kvdb: RocksEngine,
shared_block_cache: bool,
ttl_checker_scheduler: Scheduler<TTLCheckerTask>,
}
impl StorageConfigManger {
pub fn new(
kvdb: RocksEngine,
shared_block_cache: bool,
ttl_checker_scheduler: Scheduler<TTLCheckerTask>,
) -> StorageConfigManger {
StorageConfigManger {
kvdb,
shared_block_cache,
ttl_checker_scheduler,
}
}
}
impl ConfigManager for StorageConfigManger {
fn dispatch(&mut self, mut change: ConfigChange) -> CfgResult<()> {
if let Some(ConfigValue::Module(mut block_cache)) = change.remove("block_cache") {
if!self.shared_block_cache {
return Err("shared block cache is disabled".into());
}
if let Some(size) = block_cache.remove("capacity") {
let s: OptionReadableSize = size.into();
if let Some(size) = s.0 {
// Hack: since all CFs in both kvdb and raftdb share a block cache, we can change
// the size through any of them. Here we change it through default CF in kvdb.
// A better way to do it is to hold the cache reference somewhere, and use it to
// change cache size.
let opt = self.kvdb.get_options_cf(CF_DEFAULT).unwrap(); // FIXME unwrap
opt.set_block_cache_capacity(size.0)?;
// Write config to metric
CONFIG_ROCKSDB_GAUGE
.with_label_values(&[CF_DEFAULT, "block_cache_size"])
.set(size.0 as f64);
}
}
} else if let Some(v) = change.remove("ttl_check_poll_interval") {
let interval: ReadableDuration = v.into();
self.ttl_checker_scheduler
.schedule(TTLCheckerTask::UpdatePollInterval(interval.into()))
.unwrap();
}
Ok(())
}
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Configuration)]
#[serde(default)]
#[serde(rename_all = "kebab-case")]
pub struct BlockCacheConfig {
#[config(skip)]
pub shared: bool,
pub capacity: OptionReadableSize,
#[config(skip)]
pub num_shard_bits: i32,
#[config(skip)]
pub strict_capacity_limit: bool,
#[config(skip)]
pub high_pri_pool_ratio: f64,
#[config(skip)]
pub memory_allocator: Option<String>,
}
impl Default for BlockCacheConfig {
fn default() -> BlockCacheConfig {
BlockCacheConfig {
shared: true,
capacity: OptionReadableSize(None),
num_shard_bits: 6,
strict_capacity_limit: false,
high_pri_pool_ratio: 0.8,
memory_allocator: Some(String::from("nodump")),
}
}
}
impl BlockCacheConfig {
pub fn build_shared_cache(&self) -> Option<Cache> {
if!self.shared {
return None;
}
let capacity = match self.capacity.0 {
None => {
let total_mem = SysQuota::new().memory_limit_in_bytes();
((total_mem as f64) * 0.45) as usize
}
Some(c) => c.0 as usize,
};
let mut cache_opts = LRUCacheOptions::new();
cache_opts.set_capacity(capacity);
cache_opts.set_num_shard_bits(self.num_shard_bits as c_int);
cache_opts.set_strict_capacity_limit(self.strict_capacity_limit);
cache_opts.set_high_pri_pool_ratio(self.high_pri_pool_ratio);
if let Some(allocator) = self.new_memory_allocator() {
cache_opts.set_memory_allocator(allocator);
}
Some(Cache::new_lru_cache(cache_opts))
}
fn new_memory_allocator(&self) -> Option<MemoryAllocator> {
if let Some(ref alloc) = self.memory_allocator {
match alloc.as_str() {
#[cfg(feature = "jemalloc")]
"nodump" => match MemoryAllocator::new_jemalloc_memory_allocator() {
Ok(allocator) => {
return Some(allocator);
}
Err(e) => {
warn!(
"Create jemalloc nodump allocator for block cache failed: {}, continue with default allocator",
e
);
}
},
"" => {}
other => {
warn!(
"Memory allocator {} is not supported, continue with default allocator",
other
);
}
}
};
None
}
} | if self.data_dir != DEFAULT_DATA_DIR {
self.data_dir = config::canonicalize_path(&self.data_dir)?
}
if self.scheduler_concurrency > MAX_SCHED_CONCURRENCY {
warn!( | random_line_split |
config.rs | // Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
//! Storage configuration.
use crate::server::ttl::TTLCheckerTask;
use crate::server::CONFIG_ROCKSDB_GAUGE;
use configuration::{ConfigChange, ConfigManager, ConfigValue, Configuration, Result as CfgResult};
use engine_rocks::raw::{Cache, LRUCacheOptions, MemoryAllocator};
use engine_rocks::RocksEngine;
use engine_traits::{CFOptionsExt, ColumnFamilyOptions, CF_DEFAULT};
use libc::c_int;
use std::error::Error;
use tikv_util::config::{self, OptionReadableSize, ReadableDuration, ReadableSize};
use tikv_util::sys::sys_quota::SysQuota;
use tikv_util::worker::Scheduler;
pub const DEFAULT_DATA_DIR: &str = "./";
const DEFAULT_GC_RATIO_THRESHOLD: f64 = 1.1;
const DEFAULT_MAX_KEY_SIZE: usize = 4 * 1024;
const DEFAULT_SCHED_CONCURRENCY: usize = 1024 * 512;
const MAX_SCHED_CONCURRENCY: usize = 2 * 1024 * 1024;
// According to "Little's law", assuming you can write 100MB per
// second, and it takes about 100ms to process the write requests
// on average, in that situation the writing bytes estimated 10MB,
// here we use 100MB as default value for tolerate 1s latency.
const DEFAULT_SCHED_PENDING_WRITE_MB: u64 = 100;
const DEFAULT_RESERVED_SPACE_GB: u64 = 5;
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Configuration)]
#[serde(default)]
#[serde(rename_all = "kebab-case")]
pub struct Config {
#[config(skip)]
pub data_dir: String,
// Replaced by `GcConfig.ratio_threshold`. Keep it for backward compatibility.
#[config(skip)]
pub gc_ratio_threshold: f64,
#[config(skip)]
pub max_key_size: usize,
#[config(skip)]
pub scheduler_concurrency: usize,
#[config(skip)]
pub scheduler_worker_pool_size: usize,
#[config(skip)]
pub scheduler_pending_write_threshold: ReadableSize,
#[config(skip)]
// Reserve disk space to make tikv would have enough space to compact when disk is full.
pub reserve_space: ReadableSize,
#[config(skip)]
pub enable_async_apply_prewrite: bool,
#[config(skip)]
pub enable_ttl: bool,
/// Interval to check TTL for all SSTs,
pub ttl_check_poll_interval: ReadableDuration,
#[config(submodule)]
pub block_cache: BlockCacheConfig,
}
impl Default for Config {
fn | () -> Config {
let cpu_num = SysQuota::new().cpu_cores_quota();
Config {
data_dir: DEFAULT_DATA_DIR.to_owned(),
gc_ratio_threshold: DEFAULT_GC_RATIO_THRESHOLD,
max_key_size: DEFAULT_MAX_KEY_SIZE,
scheduler_concurrency: DEFAULT_SCHED_CONCURRENCY,
scheduler_worker_pool_size: if cpu_num >= 16.0 { 8 } else { 4 },
scheduler_pending_write_threshold: ReadableSize::mb(DEFAULT_SCHED_PENDING_WRITE_MB),
reserve_space: ReadableSize::gb(DEFAULT_RESERVED_SPACE_GB),
enable_async_apply_prewrite: false,
enable_ttl: false,
ttl_check_poll_interval: ReadableDuration::hours(12),
block_cache: BlockCacheConfig::default(),
}
}
}
impl Config {
pub fn validate(&mut self) -> Result<(), Box<dyn Error>> {
if self.data_dir!= DEFAULT_DATA_DIR {
self.data_dir = config::canonicalize_path(&self.data_dir)?
}
if self.scheduler_concurrency > MAX_SCHED_CONCURRENCY {
warn!(
"TiKV has optimized latch since v4.0, so it is not necessary to set large schedule \
concurrency. To save memory, change it from {:?} to {:?}",
self.scheduler_concurrency, MAX_SCHED_CONCURRENCY
);
self.scheduler_concurrency = MAX_SCHED_CONCURRENCY;
}
Ok(())
}
}
pub struct StorageConfigManger {
kvdb: RocksEngine,
shared_block_cache: bool,
ttl_checker_scheduler: Scheduler<TTLCheckerTask>,
}
impl StorageConfigManger {
pub fn new(
kvdb: RocksEngine,
shared_block_cache: bool,
ttl_checker_scheduler: Scheduler<TTLCheckerTask>,
) -> StorageConfigManger {
StorageConfigManger {
kvdb,
shared_block_cache,
ttl_checker_scheduler,
}
}
}
impl ConfigManager for StorageConfigManger {
fn dispatch(&mut self, mut change: ConfigChange) -> CfgResult<()> {
if let Some(ConfigValue::Module(mut block_cache)) = change.remove("block_cache") {
if!self.shared_block_cache {
return Err("shared block cache is disabled".into());
}
if let Some(size) = block_cache.remove("capacity") {
let s: OptionReadableSize = size.into();
if let Some(size) = s.0 {
// Hack: since all CFs in both kvdb and raftdb share a block cache, we can change
// the size through any of them. Here we change it through default CF in kvdb.
// A better way to do it is to hold the cache reference somewhere, and use it to
// change cache size.
let opt = self.kvdb.get_options_cf(CF_DEFAULT).unwrap(); // FIXME unwrap
opt.set_block_cache_capacity(size.0)?;
// Write config to metric
CONFIG_ROCKSDB_GAUGE
.with_label_values(&[CF_DEFAULT, "block_cache_size"])
.set(size.0 as f64);
}
}
} else if let Some(v) = change.remove("ttl_check_poll_interval") {
let interval: ReadableDuration = v.into();
self.ttl_checker_scheduler
.schedule(TTLCheckerTask::UpdatePollInterval(interval.into()))
.unwrap();
}
Ok(())
}
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Configuration)]
#[serde(default)]
#[serde(rename_all = "kebab-case")]
pub struct BlockCacheConfig {
#[config(skip)]
pub shared: bool,
pub capacity: OptionReadableSize,
#[config(skip)]
pub num_shard_bits: i32,
#[config(skip)]
pub strict_capacity_limit: bool,
#[config(skip)]
pub high_pri_pool_ratio: f64,
#[config(skip)]
pub memory_allocator: Option<String>,
}
impl Default for BlockCacheConfig {
fn default() -> BlockCacheConfig {
BlockCacheConfig {
shared: true,
capacity: OptionReadableSize(None),
num_shard_bits: 6,
strict_capacity_limit: false,
high_pri_pool_ratio: 0.8,
memory_allocator: Some(String::from("nodump")),
}
}
}
impl BlockCacheConfig {
pub fn build_shared_cache(&self) -> Option<Cache> {
if!self.shared {
return None;
}
let capacity = match self.capacity.0 {
None => {
let total_mem = SysQuota::new().memory_limit_in_bytes();
((total_mem as f64) * 0.45) as usize
}
Some(c) => c.0 as usize,
};
let mut cache_opts = LRUCacheOptions::new();
cache_opts.set_capacity(capacity);
cache_opts.set_num_shard_bits(self.num_shard_bits as c_int);
cache_opts.set_strict_capacity_limit(self.strict_capacity_limit);
cache_opts.set_high_pri_pool_ratio(self.high_pri_pool_ratio);
if let Some(allocator) = self.new_memory_allocator() {
cache_opts.set_memory_allocator(allocator);
}
Some(Cache::new_lru_cache(cache_opts))
}
fn new_memory_allocator(&self) -> Option<MemoryAllocator> {
if let Some(ref alloc) = self.memory_allocator {
match alloc.as_str() {
#[cfg(feature = "jemalloc")]
"nodump" => match MemoryAllocator::new_jemalloc_memory_allocator() {
Ok(allocator) => {
return Some(allocator);
}
Err(e) => {
warn!(
"Create jemalloc nodump allocator for block cache failed: {}, continue with default allocator",
e
);
}
},
"" => {}
other => {
warn!(
"Memory allocator {} is not supported, continue with default allocator",
other
);
}
}
};
None
}
}
| default | identifier_name |
main.rs | #![cfg_attr(all(test, feature = "nightly"), feature(test))] // we only need test feature when testing
#[macro_use] extern crate log;
extern crate syntex_syntax;
extern crate toml;
extern crate env_logger;
#[macro_use] extern crate clap;
extern crate racer;
#[cfg(not(test))]
use racer::core;
#[cfg(not(test))]
use racer::util;
#[cfg(not(test))]
use racer::core::Match;
#[cfg(not(test))]
use racer::util::{getline, path_exists};
#[cfg(not(test))]
use racer::nameres::{do_file_search, do_external_search, PATH_SEP};
#[cfg(not(test))]
use racer::scopes;
#[cfg(not(test))]
use std::path::{Path, PathBuf};
#[cfg(not(test))]
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
#[cfg(not(test))]
fn match_with_snippet_fn(m: Match, session: &core::Session, interface: Interface) {
let (linenum, charnum) = scopes::point_to_coords_from_file(&m.filepath, m.point, session).unwrap();
if m.matchstr == "" {
panic!("MATCHSTR is empty - waddup?");
}
let snippet = racer::snippets::snippet_for_match(&m, session);
match interface {
Interface::Text =>
println!("MATCH {};{};{};{};{};{:?};{}",
m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
Interface::TabText =>
println!("MATCH\t{}\t{}\t{}\t{}\t{}\t{:?}\t{}",
m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
}
}
#[cfg(not(test))]
fn match_fn(m: Match, session: &core::Session, interface: Interface) {
if let Some((linenum, charnum)) = scopes::point_to_coords_from_file(&m.filepath,
m.point,
session) {
match interface {
Interface::Text =>
println!("MATCH {},{},{},{},{:?},{}",
m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
Interface::TabText =>
println!("MATCH\t{}\t{}\t{}\t{}\t{:?}\t{}",
m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
}
} else {
error!("Could not resolve file coords for match {:?}", m);
}
}
#[cfg(not(test))]
fn complete(cfg: Config, print_type: CompletePrinter) {
if cfg.fqn.is_some() {
return external_complete(cfg);
}
complete_by_line_coords(cfg, print_type);
}
#[cfg(not(test))]
fn complete_by_line_coords(cfg: Config,
print_type: CompletePrinter) {
// input: linenum, colnum, fname
let tb = std::thread::Builder::new().name("searcher".to_string());
// PD: this probably sucks for performance, but lots of plugins
// end up failing and leaving tmp files around if racer crashes,
// so catch the crash.
let res = tb.spawn(move || {
run_the_complete_fn(&cfg, print_type);
}).unwrap();
if let Err(e) = res.join() {
error!("Search thread paniced: {:?}", e);
}
println!("END");
}
#[cfg(not(test))]
enum CompletePrinter {
Normal,
WithSnippets
}
#[cfg(not(test))]
fn run_the_complete_fn(cfg: &Config, print_type: CompletePrinter) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let substitute_file = cfg.substitute_file.as_ref().unwrap_or(fn_path);
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, substitute_file);
let src = session.load_file(fn_path);
let line = &getline(substitute_file, cfg.linenum, &session);
let (start, pos) = util::expand_ident(line, cfg.charnum);
match cfg.interface {
Interface::Text =>
println!("PREFIX {},{},{}", start, pos, &line[start..pos]),
Interface::TabText =>
println!("PREFIX\t{}\t{}\t{}", start, pos, &line[start..pos]),
}
let point = scopes::coords_to_point(&src, cfg.linenum, cfg.charnum);
for m in core::complete_from_file(&src, fn_path, point, &session) {
match print_type {
CompletePrinter::Normal => match_fn(m, &session, cfg.interface),
CompletePrinter::WithSnippets => match_with_snippet_fn(m, &session, cfg.interface),
};
}
}
#[cfg(not(test))]
fn external_complete(cfg: Config) {
// input: a command line string passed in
let p: Vec<&str> = cfg.fqn.as_ref().unwrap().split("::").collect();
let cwd = Path::new(".");
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, &cwd, &cwd);
for m in do_file_search(p[0], &Path::new(".")) {
if p.len() == 1 {
match_fn(m, &session, cfg.interface);
} else {
for m in do_external_search(&p[1..], &m.filepath, m.point,
core::SearchType::StartsWith,
core::Namespace::BothNamespaces, &session) {
match_fn(m, &session, cfg.interface);
}
}
}
}
#[cfg(not(test))]
fn prefix(cfg: Config) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, cfg.substitute_file.as_ref().unwrap_or(fn_path));
// print the start, end, and the identifier prefix being matched
let line = &getline(fn_path, cfg.linenum, &session);
let (start, pos) = util::expand_ident(line, cfg.charnum);
match cfg.interface {
Interface::Text =>
println!("PREFIX {},{},{}", start, pos, &line[start..pos]),
Interface::TabText =>
println!("PREFIX\t{}\t{}\t{}", start, pos, &line[start..pos]),
}
}
#[cfg(not(test))]
fn find_definition(cfg: Config) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, cfg.substitute_file.as_ref().unwrap_or(fn_path));
let src = session.load_file(fn_path);
let pos = scopes::coords_to_point(&src, cfg.linenum, cfg.charnum);
core::find_definition(&src, fn_path, pos, &session).map(|m| match_fn(m, &session, cfg.interface));
println!("END");
}
#[cfg(not(test))]
fn check_rust_src_env_var() {
if let Ok(srcpaths) = std::env::var("RUST_SRC_PATH") {
let v = srcpaths.split(PATH_SEP).collect::<Vec<_>>();
if!v.is_empty() {
let f = Path::new(v[0]);
if!path_exists(f) {
println!("racer can't find the directory pointed to by the RUST_SRC_PATH variable \"{}\". Try using an absolute fully qualified path and make sure it points to the src directory of a rust checkout - e.g. \"/home/foouser/src/rust/src\".", srcpaths);
std::process::exit(1);
} else if!path_exists(f.join("libstd")) {
println!("Unable to find libstd under RUST_SRC_PATH. N.B. RUST_SRC_PATH variable needs to point to the *src* directory inside a rust checkout e.g. \"/home/foouser/src/rust/src\". Current value \"{}\"", srcpaths);
std::process::exit(1);
}
}
} else {
let default_paths = [
"/usr/local/src/rust/src",
"/usr/src/rust/src",
];
for &path in &default_paths {
let f = Path::new(path);
if path_exists(f) {
std::env::set_var("RUST_SRC_PATH", path);
return;
}
}
println!("RUST_SRC_PATH environment variable must be set to point to the src directory of a rust checkout. E.g. \"/home/foouser/src/rust/src\"");
std::process::exit(1);
}
}
#[cfg(not(test))]
fn daemon(cfg: Config) {
use std::io;
let mut input = String::new();
while let Ok(n) = io::stdin().read_line(&mut input) {
// '\n' == 1
if n == 1 {
break;
}
// We add the setting NoBinaryName because in daemon mode we won't be passed the preceeding
// binary name
let cli = build_cli().setting(AppSettings::NoBinaryName);
let matches = match cfg.interface {
Interface::Text => cli.get_matches_from(input.trim_right().split_whitespace()),
Interface::TabText => cli.get_matches_from(input.trim_right().split('\t'))
};
run(matches, cfg.interface);
input.clear();
}
}
#[cfg(not(test))]
#[derive(Copy, Clone)]
enum Interface {
Text, // The original human-readable format.
TabText, // Machine-readable format. This is basically the same as Text, except that all field
// separators are replaced with tabs.
// In `deamon` mode tabs are also used to delimit command arguments.
}
#[cfg(not(test))]
impl Default for Interface {
fn default() -> Self { Interface::Text }
}
#[cfg(not(test))]
#[derive(Default)]
struct Config {
fqn: Option<String>,
linenum: usize,
charnum: usize,
fn_name: Option<PathBuf>,
substitute_file: Option<PathBuf>,
interface: Interface,
}
#[cfg(not(test))]
impl<'a> From<&'a ArgMatches<'a, 'a>> for Config {
fn from(m: &'a ArgMatches) -> Self {
// We check for charnum because it's the second argument, which means more than just
// an FQN was used (i.e. racer complete <linenum> <charnum> <fn_name> [substitute_file])
if m.is_present("charnum") |
Config {fqn: m.value_of("fqn").map(ToOwned::to_owned),..Default::default() }
}
}
#[cfg(not(test))]
fn build_cli<'a, 'b, 'c, 'd, 'e, 'f>() -> App<'a, 'b, 'c, 'd, 'e, 'f> {
// we use the more verbose "Builder Pattern" to create the CLI because it's a littel faster
// than the less verbose "Usage String" method...faster, meaning runtime speed since that's
// extremely important here
App::new("racer")
.version("v1.1.0")
.author("Phil Dawes")
.about("A Rust code completion utility")
.settings(&[AppSettings::GlobalVersion,
AppSettings::SubcommandRequiredElseHelp])
.arg(Arg::with_name("interface")
.long("interface")
.short("i")
.takes_value(true)
.possible_value("text")
.possible_value("tab-text")
.value_name("mode")
.help("Interface mode"))
.subcommand(SubCommand::with_name("complete")
.about("performs completion and returns matches")
// We set an explicit usage string here, instead of letting `clap` write one due to
// using a single arg for multiple purposes
.usage("racer complete <fqn>\n\t\
racer complete <linenum> <charnum> <path> [substitute_file]")
// Next we make it an error to run without any args
.setting(AppSettings::ArgRequiredElseHelp)
// Because we want a single arg to play two roles and be compatible with previous
// racer releases, we have to be a little hacky here...
//
// We start by making 'fqn' the first positional arg, which will hold this dual value
// of either an FQN as it says, or secretly a line-number
.arg(Arg::with_name("fqn")
.help("complete with a fully-qualified-name (e.g. std::io::)"))
.arg(Arg::with_name("charnum")
.help("The char number to search for matches")
.requires("path"))
.arg(Arg::with_name("path")
.help("The path to search for name to match"))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file"))
// 'linenum' **MUST** be last (or have the highest index so that it's never actually
// used by the user, but still appears in the help text)
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")))
.subcommand(SubCommand::with_name("daemon")
.about("start a process that receives the above commands via stdin"))
.subcommand(SubCommand::with_name("find-definition")
.about("finds the definition of a function")
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")
.required(true))
.arg(Arg::with_name("charnum")
.help("The char number at which to find the match")
.required(true))
.arg(Arg::with_name("path")
.help("The path to search for name to match")
.required(true))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file")))
.subcommand(SubCommand::with_name("prefix")
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")
.required(true))
.arg(Arg::with_name("charnum")
.help("The char number at which to find the match")
.required(true))
.arg(Arg::with_name("path")
.help("The path to search for the match to prefix")
.required(true)))
.subcommand(SubCommand::with_name("complete-with-snippet")
.about("performs completion and returns more detailed matches")
.usage("racer complete-with-snippet <fqn>\n\t\
racer complete-with-snippet <linenum> <charnum> <path> [substitute_file]")
.setting(AppSettings::ArgRequiredElseHelp)
.arg(Arg::with_name("fqn")
.help("complete with a fully-qualified-name (e.g. std::io::)"))
.arg(Arg::with_name("charnum")
.help("The char number to search for matches")
.requires("path"))
.arg(Arg::with_name("path")
.help("The path to search for name to match"))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file"))
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")))
.after_help("For more information about a specific command try 'racer <command> --help'")
}
#[cfg(not(test))]
fn main() {
// make sure we get a stack trace ifwe panic
::std::env::set_var("RUST_BACKTRACE","1");
env_logger::init().unwrap();
check_rust_src_env_var();
let matches = build_cli().get_matches();
let interface = match matches.value_of("interface") {
Some("text") => Interface::Text,
Some("tab-text") => Interface::TabText,
_ => Interface::Text,
};
run(matches, interface);
}
#[cfg(not(test))]
fn run(m: ArgMatches, interface: Interface) {
use CompletePrinter::{Normal, WithSnippets};
// match raw subcommand, and get it's sub-matches "m"
if let (name, Some(sub_m)) = m.subcommand() {
let mut cfg = Config::from(sub_m);
cfg.interface = interface;
match name {
"daemon" => daemon(cfg),
"prefix" => prefix(cfg),
"complete" => complete(cfg, Normal),
"complete-with-snippet" => complete(cfg, WithSnippets),
"find-definition" => find_definition(cfg),
_ => unreachable!()
}
}
}
| {
let cfg = Config {
charnum: value_t_or_exit!(m.value_of("charnum"), usize),
fn_name: m.value_of("path").map(PathBuf::from),
substitute_file: m.value_of("substitute_file").map(PathBuf::from),
..Default::default()
};
if !m.is_present("linenum") {
// Becasue of the hack to allow fqn and linenum to share a single arg we set FQN
// to None and set the charnum correctly using the FQN arg so there's no
// hackery later
return Config {linenum: value_t_or_exit!(m.value_of("fqn"), usize), .. cfg };
}
return Config {linenum: value_t_or_exit!(m.value_of("linenum"), usize), .. cfg };
} | conditional_block |
main.rs | #![cfg_attr(all(test, feature = "nightly"), feature(test))] // we only need test feature when testing
#[macro_use] extern crate log;
extern crate syntex_syntax;
extern crate toml;
extern crate env_logger;
#[macro_use] extern crate clap;
extern crate racer;
#[cfg(not(test))]
use racer::core;
#[cfg(not(test))]
use racer::util;
#[cfg(not(test))]
use racer::core::Match;
#[cfg(not(test))]
use racer::util::{getline, path_exists};
#[cfg(not(test))]
use racer::nameres::{do_file_search, do_external_search, PATH_SEP};
#[cfg(not(test))]
use racer::scopes;
#[cfg(not(test))]
use std::path::{Path, PathBuf};
#[cfg(not(test))]
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
#[cfg(not(test))]
fn match_with_snippet_fn(m: Match, session: &core::Session, interface: Interface) {
let (linenum, charnum) = scopes::point_to_coords_from_file(&m.filepath, m.point, session).unwrap();
if m.matchstr == "" {
panic!("MATCHSTR is empty - waddup?");
}
let snippet = racer::snippets::snippet_for_match(&m, session);
match interface {
Interface::Text =>
println!("MATCH {};{};{};{};{};{:?};{}",
m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
Interface::TabText =>
println!("MATCH\t{}\t{}\t{}\t{}\t{}\t{:?}\t{}",
m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
}
}
#[cfg(not(test))]
fn match_fn(m: Match, session: &core::Session, interface: Interface) {
if let Some((linenum, charnum)) = scopes::point_to_coords_from_file(&m.filepath,
m.point,
session) {
match interface {
Interface::Text =>
println!("MATCH {},{},{},{},{:?},{}",
m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
Interface::TabText =>
println!("MATCH\t{}\t{}\t{}\t{}\t{:?}\t{}",
m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
}
} else {
error!("Could not resolve file coords for match {:?}", m);
}
}
#[cfg(not(test))]
fn complete(cfg: Config, print_type: CompletePrinter) {
if cfg.fqn.is_some() {
return external_complete(cfg);
}
complete_by_line_coords(cfg, print_type);
}
#[cfg(not(test))]
fn complete_by_line_coords(cfg: Config,
print_type: CompletePrinter) {
// input: linenum, colnum, fname
let tb = std::thread::Builder::new().name("searcher".to_string());
// PD: this probably sucks for performance, but lots of plugins
// end up failing and leaving tmp files around if racer crashes,
// so catch the crash.
let res = tb.spawn(move || {
run_the_complete_fn(&cfg, print_type);
}).unwrap();
if let Err(e) = res.join() {
error!("Search thread paniced: {:?}", e);
}
println!("END");
}
#[cfg(not(test))]
enum CompletePrinter {
Normal,
WithSnippets
}
#[cfg(not(test))]
fn | (cfg: &Config, print_type: CompletePrinter) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let substitute_file = cfg.substitute_file.as_ref().unwrap_or(fn_path);
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, substitute_file);
let src = session.load_file(fn_path);
let line = &getline(substitute_file, cfg.linenum, &session);
let (start, pos) = util::expand_ident(line, cfg.charnum);
match cfg.interface {
Interface::Text =>
println!("PREFIX {},{},{}", start, pos, &line[start..pos]),
Interface::TabText =>
println!("PREFIX\t{}\t{}\t{}", start, pos, &line[start..pos]),
}
let point = scopes::coords_to_point(&src, cfg.linenum, cfg.charnum);
for m in core::complete_from_file(&src, fn_path, point, &session) {
match print_type {
CompletePrinter::Normal => match_fn(m, &session, cfg.interface),
CompletePrinter::WithSnippets => match_with_snippet_fn(m, &session, cfg.interface),
};
}
}
#[cfg(not(test))]
fn external_complete(cfg: Config) {
// input: a command line string passed in
let p: Vec<&str> = cfg.fqn.as_ref().unwrap().split("::").collect();
let cwd = Path::new(".");
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, &cwd, &cwd);
for m in do_file_search(p[0], &Path::new(".")) {
if p.len() == 1 {
match_fn(m, &session, cfg.interface);
} else {
for m in do_external_search(&p[1..], &m.filepath, m.point,
core::SearchType::StartsWith,
core::Namespace::BothNamespaces, &session) {
match_fn(m, &session, cfg.interface);
}
}
}
}
#[cfg(not(test))]
fn prefix(cfg: Config) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, cfg.substitute_file.as_ref().unwrap_or(fn_path));
// print the start, end, and the identifier prefix being matched
let line = &getline(fn_path, cfg.linenum, &session);
let (start, pos) = util::expand_ident(line, cfg.charnum);
match cfg.interface {
Interface::Text =>
println!("PREFIX {},{},{}", start, pos, &line[start..pos]),
Interface::TabText =>
println!("PREFIX\t{}\t{}\t{}", start, pos, &line[start..pos]),
}
}
#[cfg(not(test))]
fn find_definition(cfg: Config) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, cfg.substitute_file.as_ref().unwrap_or(fn_path));
let src = session.load_file(fn_path);
let pos = scopes::coords_to_point(&src, cfg.linenum, cfg.charnum);
core::find_definition(&src, fn_path, pos, &session).map(|m| match_fn(m, &session, cfg.interface));
println!("END");
}
#[cfg(not(test))]
fn check_rust_src_env_var() {
if let Ok(srcpaths) = std::env::var("RUST_SRC_PATH") {
let v = srcpaths.split(PATH_SEP).collect::<Vec<_>>();
if!v.is_empty() {
let f = Path::new(v[0]);
if!path_exists(f) {
println!("racer can't find the directory pointed to by the RUST_SRC_PATH variable \"{}\". Try using an absolute fully qualified path and make sure it points to the src directory of a rust checkout - e.g. \"/home/foouser/src/rust/src\".", srcpaths);
std::process::exit(1);
} else if!path_exists(f.join("libstd")) {
println!("Unable to find libstd under RUST_SRC_PATH. N.B. RUST_SRC_PATH variable needs to point to the *src* directory inside a rust checkout e.g. \"/home/foouser/src/rust/src\". Current value \"{}\"", srcpaths);
std::process::exit(1);
}
}
} else {
let default_paths = [
"/usr/local/src/rust/src",
"/usr/src/rust/src",
];
for &path in &default_paths {
let f = Path::new(path);
if path_exists(f) {
std::env::set_var("RUST_SRC_PATH", path);
return;
}
}
println!("RUST_SRC_PATH environment variable must be set to point to the src directory of a rust checkout. E.g. \"/home/foouser/src/rust/src\"");
std::process::exit(1);
}
}
#[cfg(not(test))]
fn daemon(cfg: Config) {
use std::io;
let mut input = String::new();
while let Ok(n) = io::stdin().read_line(&mut input) {
// '\n' == 1
if n == 1 {
break;
}
// We add the setting NoBinaryName because in daemon mode we won't be passed the preceeding
// binary name
let cli = build_cli().setting(AppSettings::NoBinaryName);
let matches = match cfg.interface {
Interface::Text => cli.get_matches_from(input.trim_right().split_whitespace()),
Interface::TabText => cli.get_matches_from(input.trim_right().split('\t'))
};
run(matches, cfg.interface);
input.clear();
}
}
#[cfg(not(test))]
#[derive(Copy, Clone)]
enum Interface {
Text, // The original human-readable format.
TabText, // Machine-readable format. This is basically the same as Text, except that all field
// separators are replaced with tabs.
// In `deamon` mode tabs are also used to delimit command arguments.
}
#[cfg(not(test))]
impl Default for Interface {
fn default() -> Self { Interface::Text }
}
#[cfg(not(test))]
#[derive(Default)]
struct Config {
fqn: Option<String>,
linenum: usize,
charnum: usize,
fn_name: Option<PathBuf>,
substitute_file: Option<PathBuf>,
interface: Interface,
}
#[cfg(not(test))]
impl<'a> From<&'a ArgMatches<'a, 'a>> for Config {
fn from(m: &'a ArgMatches) -> Self {
// We check for charnum because it's the second argument, which means more than just
// an FQN was used (i.e. racer complete <linenum> <charnum> <fn_name> [substitute_file])
if m.is_present("charnum") {
let cfg = Config {
charnum: value_t_or_exit!(m.value_of("charnum"), usize),
fn_name: m.value_of("path").map(PathBuf::from),
substitute_file: m.value_of("substitute_file").map(PathBuf::from),
..Default::default()
};
if!m.is_present("linenum") {
// Becasue of the hack to allow fqn and linenum to share a single arg we set FQN
// to None and set the charnum correctly using the FQN arg so there's no
// hackery later
return Config {linenum: value_t_or_exit!(m.value_of("fqn"), usize),.. cfg };
}
return Config {linenum: value_t_or_exit!(m.value_of("linenum"), usize),.. cfg };
}
Config {fqn: m.value_of("fqn").map(ToOwned::to_owned),..Default::default() }
}
}
#[cfg(not(test))]
fn build_cli<'a, 'b, 'c, 'd, 'e, 'f>() -> App<'a, 'b, 'c, 'd, 'e, 'f> {
// we use the more verbose "Builder Pattern" to create the CLI because it's a littel faster
// than the less verbose "Usage String" method...faster, meaning runtime speed since that's
// extremely important here
App::new("racer")
.version("v1.1.0")
.author("Phil Dawes")
.about("A Rust code completion utility")
.settings(&[AppSettings::GlobalVersion,
AppSettings::SubcommandRequiredElseHelp])
.arg(Arg::with_name("interface")
.long("interface")
.short("i")
.takes_value(true)
.possible_value("text")
.possible_value("tab-text")
.value_name("mode")
.help("Interface mode"))
.subcommand(SubCommand::with_name("complete")
.about("performs completion and returns matches")
// We set an explicit usage string here, instead of letting `clap` write one due to
// using a single arg for multiple purposes
.usage("racer complete <fqn>\n\t\
racer complete <linenum> <charnum> <path> [substitute_file]")
// Next we make it an error to run without any args
.setting(AppSettings::ArgRequiredElseHelp)
// Because we want a single arg to play two roles and be compatible with previous
// racer releases, we have to be a little hacky here...
//
// We start by making 'fqn' the first positional arg, which will hold this dual value
// of either an FQN as it says, or secretly a line-number
.arg(Arg::with_name("fqn")
.help("complete with a fully-qualified-name (e.g. std::io::)"))
.arg(Arg::with_name("charnum")
.help("The char number to search for matches")
.requires("path"))
.arg(Arg::with_name("path")
.help("The path to search for name to match"))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file"))
// 'linenum' **MUST** be last (or have the highest index so that it's never actually
// used by the user, but still appears in the help text)
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")))
.subcommand(SubCommand::with_name("daemon")
.about("start a process that receives the above commands via stdin"))
.subcommand(SubCommand::with_name("find-definition")
.about("finds the definition of a function")
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")
.required(true))
.arg(Arg::with_name("charnum")
.help("The char number at which to find the match")
.required(true))
.arg(Arg::with_name("path")
.help("The path to search for name to match")
.required(true))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file")))
.subcommand(SubCommand::with_name("prefix")
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")
.required(true))
.arg(Arg::with_name("charnum")
.help("The char number at which to find the match")
.required(true))
.arg(Arg::with_name("path")
.help("The path to search for the match to prefix")
.required(true)))
.subcommand(SubCommand::with_name("complete-with-snippet")
.about("performs completion and returns more detailed matches")
.usage("racer complete-with-snippet <fqn>\n\t\
racer complete-with-snippet <linenum> <charnum> <path> [substitute_file]")
.setting(AppSettings::ArgRequiredElseHelp)
.arg(Arg::with_name("fqn")
.help("complete with a fully-qualified-name (e.g. std::io::)"))
.arg(Arg::with_name("charnum")
.help("The char number to search for matches")
.requires("path"))
.arg(Arg::with_name("path")
.help("The path to search for name to match"))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file"))
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")))
.after_help("For more information about a specific command try 'racer <command> --help'")
}
#[cfg(not(test))]
fn main() {
// make sure we get a stack trace ifwe panic
::std::env::set_var("RUST_BACKTRACE","1");
env_logger::init().unwrap();
check_rust_src_env_var();
let matches = build_cli().get_matches();
let interface = match matches.value_of("interface") {
Some("text") => Interface::Text,
Some("tab-text") => Interface::TabText,
_ => Interface::Text,
};
run(matches, interface);
}
#[cfg(not(test))]
fn run(m: ArgMatches, interface: Interface) {
use CompletePrinter::{Normal, WithSnippets};
// match raw subcommand, and get it's sub-matches "m"
if let (name, Some(sub_m)) = m.subcommand() {
let mut cfg = Config::from(sub_m);
cfg.interface = interface;
match name {
"daemon" => daemon(cfg),
"prefix" => prefix(cfg),
"complete" => complete(cfg, Normal),
"complete-with-snippet" => complete(cfg, WithSnippets),
"find-definition" => find_definition(cfg),
_ => unreachable!()
}
}
}
| run_the_complete_fn | identifier_name |
main.rs | #![cfg_attr(all(test, feature = "nightly"), feature(test))] // we only need test feature when testing
#[macro_use] extern crate log;
extern crate syntex_syntax;
extern crate toml;
extern crate env_logger;
#[macro_use] extern crate clap;
extern crate racer;
#[cfg(not(test))]
use racer::core;
#[cfg(not(test))]
use racer::util;
#[cfg(not(test))]
use racer::core::Match;
#[cfg(not(test))]
use racer::util::{getline, path_exists};
#[cfg(not(test))]
use racer::nameres::{do_file_search, do_external_search, PATH_SEP};
#[cfg(not(test))]
use racer::scopes;
#[cfg(not(test))]
use std::path::{Path, PathBuf};
#[cfg(not(test))]
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
#[cfg(not(test))]
fn match_with_snippet_fn(m: Match, session: &core::Session, interface: Interface) {
let (linenum, charnum) = scopes::point_to_coords_from_file(&m.filepath, m.point, session).unwrap();
if m.matchstr == "" {
panic!("MATCHSTR is empty - waddup?");
}
let snippet = racer::snippets::snippet_for_match(&m, session);
match interface {
Interface::Text =>
println!("MATCH {};{};{};{};{};{:?};{}",
m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
Interface::TabText =>
println!("MATCH\t{}\t{}\t{}\t{}\t{}\t{:?}\t{}",
m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
}
}
#[cfg(not(test))]
fn match_fn(m: Match, session: &core::Session, interface: Interface) {
if let Some((linenum, charnum)) = scopes::point_to_coords_from_file(&m.filepath,
m.point,
session) {
match interface {
Interface::Text =>
println!("MATCH {},{},{},{},{:?},{}",
m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
Interface::TabText =>
println!("MATCH\t{}\t{}\t{}\t{}\t{:?}\t{}",
m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
}
} else {
error!("Could not resolve file coords for match {:?}", m);
}
}
#[cfg(not(test))]
fn complete(cfg: Config, print_type: CompletePrinter) {
if cfg.fqn.is_some() {
return external_complete(cfg);
}
complete_by_line_coords(cfg, print_type);
}
#[cfg(not(test))]
fn complete_by_line_coords(cfg: Config,
print_type: CompletePrinter) {
// input: linenum, colnum, fname
let tb = std::thread::Builder::new().name("searcher".to_string());
// PD: this probably sucks for performance, but lots of plugins
// end up failing and leaving tmp files around if racer crashes,
// so catch the crash.
let res = tb.spawn(move || {
run_the_complete_fn(&cfg, print_type);
}).unwrap();
if let Err(e) = res.join() {
error!("Search thread paniced: {:?}", e);
}
println!("END");
}
#[cfg(not(test))]
enum CompletePrinter {
Normal,
WithSnippets
}
#[cfg(not(test))]
fn run_the_complete_fn(cfg: &Config, print_type: CompletePrinter) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let substitute_file = cfg.substitute_file.as_ref().unwrap_or(fn_path);
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, substitute_file);
let src = session.load_file(fn_path);
let line = &getline(substitute_file, cfg.linenum, &session);
let (start, pos) = util::expand_ident(line, cfg.charnum);
match cfg.interface {
Interface::Text =>
println!("PREFIX {},{},{}", start, pos, &line[start..pos]),
Interface::TabText =>
println!("PREFIX\t{}\t{}\t{}", start, pos, &line[start..pos]),
}
let point = scopes::coords_to_point(&src, cfg.linenum, cfg.charnum);
for m in core::complete_from_file(&src, fn_path, point, &session) {
match print_type {
CompletePrinter::Normal => match_fn(m, &session, cfg.interface),
CompletePrinter::WithSnippets => match_with_snippet_fn(m, &session, cfg.interface),
};
}
}
#[cfg(not(test))]
fn external_complete(cfg: Config) {
// input: a command line string passed in
let p: Vec<&str> = cfg.fqn.as_ref().unwrap().split("::").collect();
let cwd = Path::new(".");
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, &cwd, &cwd);
for m in do_file_search(p[0], &Path::new(".")) {
if p.len() == 1 {
match_fn(m, &session, cfg.interface);
} else {
for m in do_external_search(&p[1..], &m.filepath, m.point,
core::SearchType::StartsWith,
core::Namespace::BothNamespaces, &session) {
match_fn(m, &session, cfg.interface);
}
}
}
}
#[cfg(not(test))]
fn prefix(cfg: Config) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, cfg.substitute_file.as_ref().unwrap_or(fn_path));
// print the start, end, and the identifier prefix being matched
let line = &getline(fn_path, cfg.linenum, &session);
let (start, pos) = util::expand_ident(line, cfg.charnum);
match cfg.interface {
Interface::Text =>
println!("PREFIX {},{},{}", start, pos, &line[start..pos]),
Interface::TabText =>
println!("PREFIX\t{}\t{}\t{}", start, pos, &line[start..pos]),
}
}
#[cfg(not(test))]
fn find_definition(cfg: Config) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, cfg.substitute_file.as_ref().unwrap_or(fn_path));
let src = session.load_file(fn_path);
let pos = scopes::coords_to_point(&src, cfg.linenum, cfg.charnum);
core::find_definition(&src, fn_path, pos, &session).map(|m| match_fn(m, &session, cfg.interface));
println!("END");
}
#[cfg(not(test))]
fn check_rust_src_env_var() {
if let Ok(srcpaths) = std::env::var("RUST_SRC_PATH") {
let v = srcpaths.split(PATH_SEP).collect::<Vec<_>>();
if!v.is_empty() {
let f = Path::new(v[0]);
if!path_exists(f) {
println!("racer can't find the directory pointed to by the RUST_SRC_PATH variable \"{}\". Try using an absolute fully qualified path and make sure it points to the src directory of a rust checkout - e.g. \"/home/foouser/src/rust/src\".", srcpaths);
std::process::exit(1);
} else if!path_exists(f.join("libstd")) {
println!("Unable to find libstd under RUST_SRC_PATH. N.B. RUST_SRC_PATH variable needs to point to the *src* directory inside a rust checkout e.g. \"/home/foouser/src/rust/src\". Current value \"{}\"", srcpaths);
std::process::exit(1);
}
}
} else {
let default_paths = [
"/usr/local/src/rust/src",
"/usr/src/rust/src",
];
for &path in &default_paths {
let f = Path::new(path);
if path_exists(f) {
std::env::set_var("RUST_SRC_PATH", path);
return;
}
}
println!("RUST_SRC_PATH environment variable must be set to point to the src directory of a rust checkout. E.g. \"/home/foouser/src/rust/src\"");
std::process::exit(1);
}
}
#[cfg(not(test))]
fn daemon(cfg: Config) {
use std::io;
let mut input = String::new();
while let Ok(n) = io::stdin().read_line(&mut input) {
// '\n' == 1
if n == 1 {
break;
}
// We add the setting NoBinaryName because in daemon mode we won't be passed the preceeding
// binary name
let cli = build_cli().setting(AppSettings::NoBinaryName);
let matches = match cfg.interface {
Interface::Text => cli.get_matches_from(input.trim_right().split_whitespace()),
Interface::TabText => cli.get_matches_from(input.trim_right().split('\t'))
};
run(matches, cfg.interface);
input.clear();
}
}
#[cfg(not(test))]
#[derive(Copy, Clone)]
enum Interface {
Text, // The original human-readable format.
TabText, // Machine-readable format. This is basically the same as Text, except that all field
// separators are replaced with tabs.
// In `deamon` mode tabs are also used to delimit command arguments.
}
#[cfg(not(test))]
impl Default for Interface {
fn default() -> Self { Interface::Text }
}
#[cfg(not(test))]
#[derive(Default)]
struct Config {
fqn: Option<String>,
linenum: usize,
charnum: usize,
fn_name: Option<PathBuf>,
substitute_file: Option<PathBuf>,
interface: Interface,
}
#[cfg(not(test))]
impl<'a> From<&'a ArgMatches<'a, 'a>> for Config {
fn from(m: &'a ArgMatches) -> Self {
// We check for charnum because it's the second argument, which means more than just
// an FQN was used (i.e. racer complete <linenum> <charnum> <fn_name> [substitute_file])
if m.is_present("charnum") {
let cfg = Config {
charnum: value_t_or_exit!(m.value_of("charnum"), usize),
fn_name: m.value_of("path").map(PathBuf::from),
substitute_file: m.value_of("substitute_file").map(PathBuf::from),
..Default::default()
};
if!m.is_present("linenum") {
// Becasue of the hack to allow fqn and linenum to share a single arg we set FQN
// to None and set the charnum correctly using the FQN arg so there's no
// hackery later
return Config {linenum: value_t_or_exit!(m.value_of("fqn"), usize),.. cfg };
}
return Config {linenum: value_t_or_exit!(m.value_of("linenum"), usize),.. cfg };
}
Config {fqn: m.value_of("fqn").map(ToOwned::to_owned),..Default::default() }
}
}
#[cfg(not(test))]
fn build_cli<'a, 'b, 'c, 'd, 'e, 'f>() -> App<'a, 'b, 'c, 'd, 'e, 'f> {
// we use the more verbose "Builder Pattern" to create the CLI because it's a littel faster
// than the less verbose "Usage String" method...faster, meaning runtime speed since that's
// extremely important here
App::new("racer")
.version("v1.1.0")
.author("Phil Dawes")
.about("A Rust code completion utility")
.settings(&[AppSettings::GlobalVersion,
AppSettings::SubcommandRequiredElseHelp])
.arg(Arg::with_name("interface")
.long("interface")
.short("i")
.takes_value(true)
.possible_value("text")
.possible_value("tab-text")
.value_name("mode")
.help("Interface mode"))
.subcommand(SubCommand::with_name("complete")
.about("performs completion and returns matches")
// We set an explicit usage string here, instead of letting `clap` write one due to
// using a single arg for multiple purposes
.usage("racer complete <fqn>\n\t\
racer complete <linenum> <charnum> <path> [substitute_file]")
// Next we make it an error to run without any args
.setting(AppSettings::ArgRequiredElseHelp)
// Because we want a single arg to play two roles and be compatible with previous
// racer releases, we have to be a little hacky here...
//
// We start by making 'fqn' the first positional arg, which will hold this dual value
// of either an FQN as it says, or secretly a line-number
.arg(Arg::with_name("fqn")
.help("complete with a fully-qualified-name (e.g. std::io::)"))
.arg(Arg::with_name("charnum")
.help("The char number to search for matches")
.requires("path"))
.arg(Arg::with_name("path")
.help("The path to search for name to match"))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file"))
// 'linenum' **MUST** be last (or have the highest index so that it's never actually
// used by the user, but still appears in the help text)
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")))
.subcommand(SubCommand::with_name("daemon")
.about("start a process that receives the above commands via stdin"))
.subcommand(SubCommand::with_name("find-definition")
.about("finds the definition of a function")
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")
.required(true))
.arg(Arg::with_name("charnum")
.help("The char number at which to find the match")
.required(true))
.arg(Arg::with_name("path")
.help("The path to search for name to match")
.required(true))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file")))
.subcommand(SubCommand::with_name("prefix")
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")
.required(true))
.arg(Arg::with_name("charnum")
.help("The char number at which to find the match")
.required(true))
.arg(Arg::with_name("path")
.help("The path to search for the match to prefix")
.required(true)))
.subcommand(SubCommand::with_name("complete-with-snippet")
.about("performs completion and returns more detailed matches")
.usage("racer complete-with-snippet <fqn>\n\t\
racer complete-with-snippet <linenum> <charnum> <path> [substitute_file]")
.setting(AppSettings::ArgRequiredElseHelp)
.arg(Arg::with_name("fqn")
.help("complete with a fully-qualified-name (e.g. std::io::)"))
.arg(Arg::with_name("charnum")
.help("The char number to search for matches")
.requires("path"))
.arg(Arg::with_name("path")
.help("The path to search for name to match"))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file"))
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")))
.after_help("For more information about a specific command try 'racer <command> --help'")
}
#[cfg(not(test))]
fn main() |
#[cfg(not(test))]
fn run(m: ArgMatches, interface: Interface) {
use CompletePrinter::{Normal, WithSnippets};
// match raw subcommand, and get it's sub-matches "m"
if let (name, Some(sub_m)) = m.subcommand() {
let mut cfg = Config::from(sub_m);
cfg.interface = interface;
match name {
"daemon" => daemon(cfg),
"prefix" => prefix(cfg),
"complete" => complete(cfg, Normal),
"complete-with-snippet" => complete(cfg, WithSnippets),
"find-definition" => find_definition(cfg),
_ => unreachable!()
}
}
}
| {
// make sure we get a stack trace ifwe panic
::std::env::set_var("RUST_BACKTRACE","1");
env_logger::init().unwrap();
check_rust_src_env_var();
let matches = build_cli().get_matches();
let interface = match matches.value_of("interface") {
Some("text") => Interface::Text,
Some("tab-text") => Interface::TabText,
_ => Interface::Text,
};
run(matches, interface);
} | identifier_body |
main.rs | #![cfg_attr(all(test, feature = "nightly"), feature(test))] // we only need test feature when testing
#[macro_use] extern crate log;
extern crate syntex_syntax;
extern crate toml;
extern crate env_logger;
#[macro_use] extern crate clap;
extern crate racer;
#[cfg(not(test))]
use racer::core;
#[cfg(not(test))]
use racer::util;
#[cfg(not(test))]
use racer::core::Match;
#[cfg(not(test))]
use racer::util::{getline, path_exists};
#[cfg(not(test))]
use racer::nameres::{do_file_search, do_external_search, PATH_SEP};
#[cfg(not(test))]
use racer::scopes;
#[cfg(not(test))]
use std::path::{Path, PathBuf};
#[cfg(not(test))]
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
#[cfg(not(test))]
fn match_with_snippet_fn(m: Match, session: &core::Session, interface: Interface) {
let (linenum, charnum) = scopes::point_to_coords_from_file(&m.filepath, m.point, session).unwrap();
if m.matchstr == "" {
panic!("MATCHSTR is empty - waddup?");
}
let snippet = racer::snippets::snippet_for_match(&m, session);
match interface {
Interface::Text =>
println!("MATCH {};{};{};{};{};{:?};{}",
m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
Interface::TabText =>
println!("MATCH\t{}\t{}\t{}\t{}\t{}\t{:?}\t{}",
m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
}
}
#[cfg(not(test))]
fn match_fn(m: Match, session: &core::Session, interface: Interface) {
if let Some((linenum, charnum)) = scopes::point_to_coords_from_file(&m.filepath,
m.point,
session) {
match interface {
Interface::Text =>
println!("MATCH {},{},{},{},{:?},{}",
m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
Interface::TabText =>
println!("MATCH\t{}\t{}\t{}\t{}\t{:?}\t{}",
m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr),
}
} else {
error!("Could not resolve file coords for match {:?}", m);
}
}
#[cfg(not(test))]
fn complete(cfg: Config, print_type: CompletePrinter) {
if cfg.fqn.is_some() {
return external_complete(cfg);
}
complete_by_line_coords(cfg, print_type);
}
#[cfg(not(test))]
fn complete_by_line_coords(cfg: Config,
print_type: CompletePrinter) {
// input: linenum, colnum, fname
let tb = std::thread::Builder::new().name("searcher".to_string());
// PD: this probably sucks for performance, but lots of plugins
// end up failing and leaving tmp files around if racer crashes,
// so catch the crash.
let res = tb.spawn(move || {
run_the_complete_fn(&cfg, print_type);
}).unwrap();
if let Err(e) = res.join() {
error!("Search thread paniced: {:?}", e);
}
println!("END");
}
#[cfg(not(test))]
enum CompletePrinter {
Normal,
WithSnippets
}
#[cfg(not(test))]
fn run_the_complete_fn(cfg: &Config, print_type: CompletePrinter) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let substitute_file = cfg.substitute_file.as_ref().unwrap_or(fn_path);
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, substitute_file);
let src = session.load_file(fn_path);
let line = &getline(substitute_file, cfg.linenum, &session);
let (start, pos) = util::expand_ident(line, cfg.charnum);
match cfg.interface {
Interface::Text =>
println!("PREFIX {},{},{}", start, pos, &line[start..pos]),
Interface::TabText =>
println!("PREFIX\t{}\t{}\t{}", start, pos, &line[start..pos]),
}
let point = scopes::coords_to_point(&src, cfg.linenum, cfg.charnum);
for m in core::complete_from_file(&src, fn_path, point, &session) {
match print_type {
CompletePrinter::Normal => match_fn(m, &session, cfg.interface),
CompletePrinter::WithSnippets => match_with_snippet_fn(m, &session, cfg.interface),
};
}
}
#[cfg(not(test))]
fn external_complete(cfg: Config) {
// input: a command line string passed in
let p: Vec<&str> = cfg.fqn.as_ref().unwrap().split("::").collect();
let cwd = Path::new(".");
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, &cwd, &cwd);
for m in do_file_search(p[0], &Path::new(".")) {
if p.len() == 1 {
match_fn(m, &session, cfg.interface);
} else {
for m in do_external_search(&p[1..], &m.filepath, m.point,
core::SearchType::StartsWith,
core::Namespace::BothNamespaces, &session) {
match_fn(m, &session, cfg.interface);
}
}
}
}
#[cfg(not(test))]
fn prefix(cfg: Config) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, cfg.substitute_file.as_ref().unwrap_or(fn_path));
// print the start, end, and the identifier prefix being matched
let line = &getline(fn_path, cfg.linenum, &session);
let (start, pos) = util::expand_ident(line, cfg.charnum);
match cfg.interface {
Interface::Text =>
println!("PREFIX {},{},{}", start, pos, &line[start..pos]),
Interface::TabText =>
println!("PREFIX\t{}\t{}\t{}", start, pos, &line[start..pos]),
}
}
#[cfg(not(test))]
fn find_definition(cfg: Config) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, cfg.substitute_file.as_ref().unwrap_or(fn_path));
let src = session.load_file(fn_path);
let pos = scopes::coords_to_point(&src, cfg.linenum, cfg.charnum);
core::find_definition(&src, fn_path, pos, &session).map(|m| match_fn(m, &session, cfg.interface));
println!("END");
}
#[cfg(not(test))]
fn check_rust_src_env_var() {
if let Ok(srcpaths) = std::env::var("RUST_SRC_PATH") {
let v = srcpaths.split(PATH_SEP).collect::<Vec<_>>();
if!v.is_empty() {
let f = Path::new(v[0]);
if!path_exists(f) {
println!("racer can't find the directory pointed to by the RUST_SRC_PATH variable \"{}\". Try using an absolute fully qualified path and make sure it points to the src directory of a rust checkout - e.g. \"/home/foouser/src/rust/src\".", srcpaths);
std::process::exit(1);
} else if!path_exists(f.join("libstd")) {
println!("Unable to find libstd under RUST_SRC_PATH. N.B. RUST_SRC_PATH variable needs to point to the *src* directory inside a rust checkout e.g. \"/home/foouser/src/rust/src\". Current value \"{}\"", srcpaths);
std::process::exit(1);
}
}
} else {
let default_paths = [
"/usr/local/src/rust/src",
"/usr/src/rust/src",
];
for &path in &default_paths {
let f = Path::new(path);
if path_exists(f) {
std::env::set_var("RUST_SRC_PATH", path);
return;
}
}
println!("RUST_SRC_PATH environment variable must be set to point to the src directory of a rust checkout. E.g. \"/home/foouser/src/rust/src\"");
std::process::exit(1);
}
}
#[cfg(not(test))]
fn daemon(cfg: Config) {
use std::io;
let mut input = String::new();
while let Ok(n) = io::stdin().read_line(&mut input) {
// '\n' == 1
if n == 1 {
break;
}
// We add the setting NoBinaryName because in daemon mode we won't be passed the preceeding
// binary name
let cli = build_cli().setting(AppSettings::NoBinaryName);
let matches = match cfg.interface {
Interface::Text => cli.get_matches_from(input.trim_right().split_whitespace()),
Interface::TabText => cli.get_matches_from(input.trim_right().split('\t'))
};
run(matches, cfg.interface);
input.clear();
}
}
#[cfg(not(test))]
#[derive(Copy, Clone)]
enum Interface {
Text, // The original human-readable format.
TabText, // Machine-readable format. This is basically the same as Text, except that all field
// separators are replaced with tabs.
// In `deamon` mode tabs are also used to delimit command arguments. | }
#[cfg(not(test))]
impl Default for Interface {
fn default() -> Self { Interface::Text }
}
#[cfg(not(test))]
#[derive(Default)]
struct Config {
fqn: Option<String>,
linenum: usize,
charnum: usize,
fn_name: Option<PathBuf>,
substitute_file: Option<PathBuf>,
interface: Interface,
}
#[cfg(not(test))]
impl<'a> From<&'a ArgMatches<'a, 'a>> for Config {
fn from(m: &'a ArgMatches) -> Self {
// We check for charnum because it's the second argument, which means more than just
// an FQN was used (i.e. racer complete <linenum> <charnum> <fn_name> [substitute_file])
if m.is_present("charnum") {
let cfg = Config {
charnum: value_t_or_exit!(m.value_of("charnum"), usize),
fn_name: m.value_of("path").map(PathBuf::from),
substitute_file: m.value_of("substitute_file").map(PathBuf::from),
..Default::default()
};
if!m.is_present("linenum") {
// Becasue of the hack to allow fqn and linenum to share a single arg we set FQN
// to None and set the charnum correctly using the FQN arg so there's no
// hackery later
return Config {linenum: value_t_or_exit!(m.value_of("fqn"), usize),.. cfg };
}
return Config {linenum: value_t_or_exit!(m.value_of("linenum"), usize),.. cfg };
}
Config {fqn: m.value_of("fqn").map(ToOwned::to_owned),..Default::default() }
}
}
#[cfg(not(test))]
fn build_cli<'a, 'b, 'c, 'd, 'e, 'f>() -> App<'a, 'b, 'c, 'd, 'e, 'f> {
// we use the more verbose "Builder Pattern" to create the CLI because it's a littel faster
// than the less verbose "Usage String" method...faster, meaning runtime speed since that's
// extremely important here
App::new("racer")
.version("v1.1.0")
.author("Phil Dawes")
.about("A Rust code completion utility")
.settings(&[AppSettings::GlobalVersion,
AppSettings::SubcommandRequiredElseHelp])
.arg(Arg::with_name("interface")
.long("interface")
.short("i")
.takes_value(true)
.possible_value("text")
.possible_value("tab-text")
.value_name("mode")
.help("Interface mode"))
.subcommand(SubCommand::with_name("complete")
.about("performs completion and returns matches")
// We set an explicit usage string here, instead of letting `clap` write one due to
// using a single arg for multiple purposes
.usage("racer complete <fqn>\n\t\
racer complete <linenum> <charnum> <path> [substitute_file]")
// Next we make it an error to run without any args
.setting(AppSettings::ArgRequiredElseHelp)
// Because we want a single arg to play two roles and be compatible with previous
// racer releases, we have to be a little hacky here...
//
// We start by making 'fqn' the first positional arg, which will hold this dual value
// of either an FQN as it says, or secretly a line-number
.arg(Arg::with_name("fqn")
.help("complete with a fully-qualified-name (e.g. std::io::)"))
.arg(Arg::with_name("charnum")
.help("The char number to search for matches")
.requires("path"))
.arg(Arg::with_name("path")
.help("The path to search for name to match"))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file"))
// 'linenum' **MUST** be last (or have the highest index so that it's never actually
// used by the user, but still appears in the help text)
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")))
.subcommand(SubCommand::with_name("daemon")
.about("start a process that receives the above commands via stdin"))
.subcommand(SubCommand::with_name("find-definition")
.about("finds the definition of a function")
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")
.required(true))
.arg(Arg::with_name("charnum")
.help("The char number at which to find the match")
.required(true))
.arg(Arg::with_name("path")
.help("The path to search for name to match")
.required(true))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file")))
.subcommand(SubCommand::with_name("prefix")
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")
.required(true))
.arg(Arg::with_name("charnum")
.help("The char number at which to find the match")
.required(true))
.arg(Arg::with_name("path")
.help("The path to search for the match to prefix")
.required(true)))
.subcommand(SubCommand::with_name("complete-with-snippet")
.about("performs completion and returns more detailed matches")
.usage("racer complete-with-snippet <fqn>\n\t\
racer complete-with-snippet <linenum> <charnum> <path> [substitute_file]")
.setting(AppSettings::ArgRequiredElseHelp)
.arg(Arg::with_name("fqn")
.help("complete with a fully-qualified-name (e.g. std::io::)"))
.arg(Arg::with_name("charnum")
.help("The char number to search for matches")
.requires("path"))
.arg(Arg::with_name("path")
.help("The path to search for name to match"))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file"))
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")))
.after_help("For more information about a specific command try 'racer <command> --help'")
}
#[cfg(not(test))]
fn main() {
// make sure we get a stack trace ifwe panic
::std::env::set_var("RUST_BACKTRACE","1");
env_logger::init().unwrap();
check_rust_src_env_var();
let matches = build_cli().get_matches();
let interface = match matches.value_of("interface") {
Some("text") => Interface::Text,
Some("tab-text") => Interface::TabText,
_ => Interface::Text,
};
run(matches, interface);
}
#[cfg(not(test))]
fn run(m: ArgMatches, interface: Interface) {
use CompletePrinter::{Normal, WithSnippets};
// match raw subcommand, and get it's sub-matches "m"
if let (name, Some(sub_m)) = m.subcommand() {
let mut cfg = Config::from(sub_m);
cfg.interface = interface;
match name {
"daemon" => daemon(cfg),
"prefix" => prefix(cfg),
"complete" => complete(cfg, Normal),
"complete-with-snippet" => complete(cfg, WithSnippets),
"find-definition" => find_definition(cfg),
_ => unreachable!()
}
}
} | random_line_split |
|
mod.rs | //! Provides dedicated `system` pipelines inside OrbTk.
//!
//! System pipelines are modules, that handle specific tasks when
//! iteratively walking the widget tree. Because each widget
//! implements the `state` trait, all system modules are accessible.
//! Pipelines are connected in a logical order. E.g. the `InitSystem`
//! is triggered **before** the `LayoutSystem`. The `LayoutSystem` is
//! triggerd **before** the `RenderSystem`. Handling of widget objects
//! inside the pipelines rely on the Entity Component System
//! ([`DCES`]).
//!
//! [`DCES`]: https://gitlab.redox-os.org/redox-os/dces-rust
pub use self::cleanup_system::*;
pub use self::event_state_system::*;
pub use self::init_system::*;
pub use self::layout_system::*;
pub use self::post_layout_state_system::*;
pub use self::render_system::*; | mod init_system;
mod layout_system;
mod post_layout_state_system;
mod render_system; |
mod cleanup_system;
mod event_state_system; | random_line_split |
session.rs | //! This exposes `Session`, the struct stored in the `Alloy`.
use std::sync::Arc;
use super::SessionStore;
/// A session which provides basic CRUD operations. | key: K,
store: Arc<Box<SessionStore<K, V> +'static + Send + Sync>>
}
impl<K, V> Session<K, V> {
/// Create a new session
pub fn new(key: K, store: Box<SessionStore<K, V> +'static + Send + Sync>) -> Session<K, V> {
Session {
key: key,
store: Arc::new(store)
}
}
/// Set the value of this session, replacing any previously set value.
pub fn insert(&self, value: V) {
self.store.insert(&self.key, value)
}
/// Retrieve the value of this session.
///
/// Returns `None` if this session has not been set.
pub fn find(&self) -> Option<V> {
self.store.find(&self.key)
}
/// Swap the given value with the current value of this session.
///
/// Returns the value being replaced.
/// Returns `None` if this session was not yet set.
pub fn swap(&self, value: V) -> Option<V> {
self.store.swap(&self.key, value)
}
/// Insert value, if not yet set, or update the current value of this session.
///
/// Returns an owned copy of the set (current) value of this session.
///
/// This is analagous to the `insert_or_update_with` method of `HashMap`.
pub fn upsert(&self, value: V, mutator: |&mut V|) -> V {
self.store.upsert(&self.key, value, mutator)
}
/// Remove the session stored at this key.
pub fn remove(&self) -> bool {
self.store.remove(&self.key)
}
} | pub struct Session<K, V> { | random_line_split |
session.rs | //! This exposes `Session`, the struct stored in the `Alloy`.
use std::sync::Arc;
use super::SessionStore;
/// A session which provides basic CRUD operations.
pub struct Session<K, V> {
key: K,
store: Arc<Box<SessionStore<K, V> +'static + Send + Sync>>
}
impl<K, V> Session<K, V> {
/// Create a new session
pub fn new(key: K, store: Box<SessionStore<K, V> +'static + Send + Sync>) -> Session<K, V> {
Session {
key: key,
store: Arc::new(store)
}
}
/// Set the value of this session, replacing any previously set value.
pub fn insert(&self, value: V) {
self.store.insert(&self.key, value)
}
/// Retrieve the value of this session.
///
/// Returns `None` if this session has not been set.
pub fn find(&self) -> Option<V> {
self.store.find(&self.key)
}
/// Swap the given value with the current value of this session.
///
/// Returns the value being replaced.
/// Returns `None` if this session was not yet set.
pub fn | (&self, value: V) -> Option<V> {
self.store.swap(&self.key, value)
}
/// Insert value, if not yet set, or update the current value of this session.
///
/// Returns an owned copy of the set (current) value of this session.
///
/// This is analagous to the `insert_or_update_with` method of `HashMap`.
pub fn upsert(&self, value: V, mutator: |&mut V|) -> V {
self.store.upsert(&self.key, value, mutator)
}
/// Remove the session stored at this key.
pub fn remove(&self) -> bool {
self.store.remove(&self.key)
}
}
| swap | identifier_name |
session.rs | //! This exposes `Session`, the struct stored in the `Alloy`.
use std::sync::Arc;
use super::SessionStore;
/// A session which provides basic CRUD operations.
pub struct Session<K, V> {
key: K,
store: Arc<Box<SessionStore<K, V> +'static + Send + Sync>>
}
impl<K, V> Session<K, V> {
/// Create a new session
pub fn new(key: K, store: Box<SessionStore<K, V> +'static + Send + Sync>) -> Session<K, V> {
Session {
key: key,
store: Arc::new(store)
}
}
/// Set the value of this session, replacing any previously set value.
pub fn insert(&self, value: V) {
self.store.insert(&self.key, value)
}
/// Retrieve the value of this session.
///
/// Returns `None` if this session has not been set.
pub fn find(&self) -> Option<V> |
/// Swap the given value with the current value of this session.
///
/// Returns the value being replaced.
/// Returns `None` if this session was not yet set.
pub fn swap(&self, value: V) -> Option<V> {
self.store.swap(&self.key, value)
}
/// Insert value, if not yet set, or update the current value of this session.
///
/// Returns an owned copy of the set (current) value of this session.
///
/// This is analagous to the `insert_or_update_with` method of `HashMap`.
pub fn upsert(&self, value: V, mutator: |&mut V|) -> V {
self.store.upsert(&self.key, value, mutator)
}
/// Remove the session stored at this key.
pub fn remove(&self) -> bool {
self.store.remove(&self.key)
}
}
| {
self.store.find(&self.key)
} | identifier_body |
poly.rs | // Copyright (C) 2020 Inderjit Gill <[email protected]>
// This file is part of Seni
// Seni is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Seni is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::error::{Error, Result};
use crate::matrix::Matrix;
use crate::render_list::RenderList;
use crate::rgb::Rgb;
use crate::uvmapper::UvMapping;
use log::error;
pub fn | (
render_list: &mut RenderList,
matrix: &Matrix,
coords: &[(f32, f32)],
colours: &[Rgb],
uvm: &UvMapping,
) -> Result<()> {
let num_vertices = coords.len();
if colours.len()!= num_vertices {
error!("render_poly: coords and colours length mismatch");
return Err(Error::Geometry);
} else if num_vertices < 3 {
return Ok(());
}
let (x, y) = coords[0];
render_list.prepare_to_add_triangle_strip(matrix, num_vertices, x, y)?;
let rp = render_list
.render_packets
.last_mut()
.ok_or(Error::Geometry)?;
let rpg = rp.get_mut_render_packet_geometry()?;
for i in 0..num_vertices {
let (x, y) = coords[i];
rpg.add_vertex(matrix, x, y, &colours[i], uvm.map[4], uvm.map[5])
}
Ok(())
}
| render | identifier_name |
poly.rs | // Copyright (C) 2020 Inderjit Gill <[email protected]>
// This file is part of Seni
// Seni is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Seni is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::error::{Error, Result};
use crate::matrix::Matrix;
use crate::render_list::RenderList;
use crate::rgb::Rgb;
use crate::uvmapper::UvMapping;
use log::error;
pub fn render(
render_list: &mut RenderList,
matrix: &Matrix,
coords: &[(f32, f32)],
colours: &[Rgb],
uvm: &UvMapping,
) -> Result<()> | rpg.add_vertex(matrix, x, y, &colours[i], uvm.map[4], uvm.map[5])
}
Ok(())
}
| {
let num_vertices = coords.len();
if colours.len() != num_vertices {
error!("render_poly: coords and colours length mismatch");
return Err(Error::Geometry);
} else if num_vertices < 3 {
return Ok(());
}
let (x, y) = coords[0];
render_list.prepare_to_add_triangle_strip(matrix, num_vertices, x, y)?;
let rp = render_list
.render_packets
.last_mut()
.ok_or(Error::Geometry)?;
let rpg = rp.get_mut_render_packet_geometry()?;
for i in 0..num_vertices {
let (x, y) = coords[i]; | identifier_body |
poly.rs | // Copyright (C) 2020 Inderjit Gill <[email protected]>
// This file is part of Seni
// Seni is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Seni is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
| use crate::render_list::RenderList;
use crate::rgb::Rgb;
use crate::uvmapper::UvMapping;
use log::error;
pub fn render(
render_list: &mut RenderList,
matrix: &Matrix,
coords: &[(f32, f32)],
colours: &[Rgb],
uvm: &UvMapping,
) -> Result<()> {
let num_vertices = coords.len();
if colours.len()!= num_vertices {
error!("render_poly: coords and colours length mismatch");
return Err(Error::Geometry);
} else if num_vertices < 3 {
return Ok(());
}
let (x, y) = coords[0];
render_list.prepare_to_add_triangle_strip(matrix, num_vertices, x, y)?;
let rp = render_list
.render_packets
.last_mut()
.ok_or(Error::Geometry)?;
let rpg = rp.get_mut_render_packet_geometry()?;
for i in 0..num_vertices {
let (x, y) = coords[i];
rpg.add_vertex(matrix, x, y, &colours[i], uvm.map[4], uvm.map[5])
}
Ok(())
} | use crate::error::{Error, Result};
use crate::matrix::Matrix; | random_line_split |
sph_hamsi_test.rs | extern crate sphlib;
extern crate libc;
use sphlib::{sph_hamsi, utils};
#[test]
fn will_be_224_hash() {
let dest = sph_hamsi::hamsi224_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("b9f6eb1a9b990373f9d2cb125584333c69a3d41ae291845f05da221f", actual.to_string());
}
#[test]
fn will_be_256_hash() |
#[test]
fn will_be_384_hash() {
let dest = sph_hamsi::hamsi384_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("3943cd34e3b96b197a8bf4bac7aa982d18530dd12f41136b26d7e88759255f21153f4a4bd02e523612b8427f9dd96c8d", actual.to_string());
}
#[test]
fn will_be_512_hash() {
let dest = sph_hamsi::hamsi512_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("5cd7436a91e27fc809d7015c3407540633dab391127113ce6ba360f0c1e35f404510834a551610d6e871e75651ea381a8ba628af1dcf2b2be13af2eb6247290f", actual.to_string());
}
| {
let dest = sph_hamsi::hamsi256_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("750e9ec469f4db626bee7e0c10ddaa1bd01fe194b94efbabebd24764dc2b13e9", actual.to_string());
} | identifier_body |
sph_hamsi_test.rs | extern crate sphlib;
extern crate libc;
use sphlib::{sph_hamsi, utils};
#[test]
fn | () {
let dest = sph_hamsi::hamsi224_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("b9f6eb1a9b990373f9d2cb125584333c69a3d41ae291845f05da221f", actual.to_string());
}
#[test]
fn will_be_256_hash() {
let dest = sph_hamsi::hamsi256_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("750e9ec469f4db626bee7e0c10ddaa1bd01fe194b94efbabebd24764dc2b13e9", actual.to_string());
}
#[test]
fn will_be_384_hash() {
let dest = sph_hamsi::hamsi384_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("3943cd34e3b96b197a8bf4bac7aa982d18530dd12f41136b26d7e88759255f21153f4a4bd02e523612b8427f9dd96c8d", actual.to_string());
}
#[test]
fn will_be_512_hash() {
let dest = sph_hamsi::hamsi512_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("5cd7436a91e27fc809d7015c3407540633dab391127113ce6ba360f0c1e35f404510834a551610d6e871e75651ea381a8ba628af1dcf2b2be13af2eb6247290f", actual.to_string());
}
| will_be_224_hash | identifier_name |
sph_hamsi_test.rs | extern crate sphlib;
extern crate libc;
use sphlib::{sph_hamsi, utils};
#[test]
fn will_be_224_hash() {
let dest = sph_hamsi::hamsi224_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("b9f6eb1a9b990373f9d2cb125584333c69a3d41ae291845f05da221f", actual.to_string());
}
#[test]
fn will_be_256_hash() {
let dest = sph_hamsi::hamsi256_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("750e9ec469f4db626bee7e0c10ddaa1bd01fe194b94efbabebd24764dc2b13e9", actual.to_string());
}
| }
#[test]
fn will_be_512_hash() {
let dest = sph_hamsi::hamsi512_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("5cd7436a91e27fc809d7015c3407540633dab391127113ce6ba360f0c1e35f404510834a551610d6e871e75651ea381a8ba628af1dcf2b2be13af2eb6247290f", actual.to_string());
} | #[test]
fn will_be_384_hash() {
let dest = sph_hamsi::hamsi384_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("3943cd34e3b96b197a8bf4bac7aa982d18530dd12f41136b26d7e88759255f21153f4a4bd02e523612b8427f9dd96c8d", actual.to_string()); | random_line_split |
match.rs | nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn |
Paternnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn => {
blah
}
Patternnnnnnnnnnnnnnnnnnn |
Patternnnnnnnnnnnnnnnnnnn |
Patternnnnnnnnnnnnnnnnnnn |
Patternnnnnnnnnnnnnnnnnnn => meh,
Patternnnnnnnnnnnnnnnnnnn |
Patternnnnnnnnnnnnnnnnnnn if looooooooooooooooooong_guard => meh,
Patternnnnnnnnnnnnnnnnnnnnnnnnn |
Patternnnnnnnnnnnnnnnnnnnnnnnnn if looooooooooooooooooooooooooooooooooooooooong_guard =>
meh,
// Test that earlier patterns can take the guard space
(aaaa, bbbbb, ccccccc, aaaaa, bbbbbbbb, cccccc, aaaa, bbbbbbbb, cccccc, dddddd) |
Patternnnnnnnnnnnnnnnnnnnnnnnnn if loooooooooooooooooooooooooooooooooooooooooong_guard => {}
_ => {}
ast::PathParameters::AngleBracketedParameters(ref data) if data.lifetimes.len() > 0 ||
data.types.len() > 0 ||
data.bindings.len() > 0 => {}
}
let whatever = match something {
/// DOC COMMENT!
Some(_) => 42,
// Comment on an attribute.
#[an_attribute]
// Comment after an attribute.
None => 0,
#[rustfmt::skip]
Blurb => { }
};
}
// Test that a match on an overflow line is laid out properly.
fn main() {
let sub_span =
match xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx {
Some(sub_span) => Some(sub_span),
None => sub_span,
};
}
// Test that one-line bodies align.
fn main() {
match r {
Variableeeeeeeeeeeeeeeeee => ( "variable",
vec!("id", "name", "qualname",
"value", "type", "scopeid"),
true,
true),
Enummmmmmmmmmmmmmmmmmmmm => ("enum",
vec!("id","qualname","scopeid","value"),
true,
true),
Variantttttttttttttttttttttttt => ("variant",
vec!("id",
"name",
"qualname",
"type",
"value",
"scopeid"),
true,
true),
};
match x{
y=>{/*Block with comment. Preserve me.*/ }
z=>{stmt();} }
}
fn matches() {
match 1 {
-1 => 10,
1 => 1, // foo
2 => 2,
// bar
3 => 3,
_ => 0 // baz
}
}
fn match_skip() {
let _ = match Some(1) {
#[rustfmt::skip]
Some( n ) => n,
None => 1,
};
}
fn issue339() {
match a {
b => {}
c => { }
d => {
}
e => {
}
// collapsing here is safe
ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff => {
}
// collapsing here exceeds line length
ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffg => {
}
h => { // comment above block
}
i => {
} // comment below block
j => {
// comment inside block
}
j2 => {
// comments inside...
} //... and after
// TODO uncomment when vertical whitespace is handled better
// k => {
//
// // comment with WS above
// }
// l => {
// // comment with ws below
//
// }
m => {
} n => { } o =>
{
}
p => { // Don't collapse me
} q => { } r =>
{
}
s => 0, // s comment
// t comment
t => 1,
u => 2,
v => {
} /* funky block
* comment */
// final comment
}
}
fn issue355() {
match mac {
a => println!("a", b),
b => vec!(1, 2),
c => vec!(3; 4),
d => {
println!("a", b)
}
e => {
vec!(1, 2)
}
f => {
vec!(3; 4)
}
h => println!("a", b), // h comment
i => vec!(1, 2), // i comment
j => vec!(3; 4), // j comment
// k comment
k => println!("a", b),
// l comment
l => vec!(1, 2),
// m comment
m => vec!(3; 4),
// Rewrite splits macro
nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn => println!("a", b),
// Rewrite splits macro
oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo => vec!(1, 2),
// Macro support fails to recognise this macro as splittable
// We push the whole expr to a new line, TODO split this macro as well
pppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppp => vec!(3; 4),
// q, r and s: Rewrite splits match arm
qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq => println!("a", b),
rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr => vec!(1, 2),
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss => vec!(3; 4),
// Funky bracketing styles
t => println!{"a", b},
u => vec!{1, 2},
v => vec!{3; 4},
w => println!["a", b],
x => vec![1, 2],
y =>vec![3; 4],
// Brackets with comments
tc => println!{"a", b}, // comment
uc => vec!{1, 2}, // comment
vc =>vec!{3; 4}, // comment
wc =>println!["a", b], // comment
xc => vec![1,2], // comment
yc => vec![3; 4], // comment
yd =>
looooooooooooooooooooooooooooooooooooooooooooooooooooooooong_func(aaaaaaaaaa,
bbbbbbbbbb,
cccccccccc,
dddddddddd),
}
}
fn issue280() {
{
match x {
CompressionMode::DiscardNewline | CompressionMode::CompressWhitespaceNewline => ch ==
'\n',
ast::ItemConst(ref typ, ref expr) => self.process_static_or_const_item(item,
&typ,
&expr),
}
}
}
fn issue383() {
match resolution.last_private {LastImport{..} => false, _ => true};
}
fn issue507() {
match 1 {
1 => unsafe { std::intrinsics::abort() },
_ => (),
}
}
fn issue508() {
match s.type_id() {
Some(NodeTypeId::Element(ElementTypeId::HTMLElement(
HTMLElementTypeId::HTMLCanvasElement))) => true,
Some(NodeTypeId::Element(ElementTypeId::HTMLElement(
HTMLElementTypeId::HTMLObjectElement))) => s.has_object_data(),
Some(NodeTypeId::Element(_)) => false,
}
}
fn issue496() {{{{
match def {
def::DefConst(def_id) | def::DefAssociatedConst(def_id) =>
match const_eval::lookup_const_by_id(cx.tcx, def_id, Some(self.pat.id)) {
Some(const_expr) => { x }}}}}}}
fn issue494() {
{
match stmt.node {
hir::StmtExpr(ref expr, id) | hir::StmtSemi(ref expr, id) =>
result.push(
StmtRef::Mirror(
Box::new(Stmt { span: stmt.span,
kind: StmtKind::Expr {
scope: cx.tcx.region_maps.node_extent(id),
expr: expr.to_ref() } }))),
}
}
}
fn issue386() {
match foo {
BiEq | BiLt | BiLe | BiNe | BiGt | BiGe =>
true,
BiAnd | BiOr | BiAdd | BiSub | BiMul | BiDiv | BiRem |
BiBitXor | BiBitAnd | BiBitOr | BiShl | BiShr =>
false,
}
}
fn guards() {
match foo {
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa if foooooooooooooo && barrrrrrrrrrrr => {}
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa if foooooooooooooo && barrrrrrrrrrrr => {}
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
if fooooooooooooooooooooo &&
(bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb || cccccccccccccccccccccccccccccccccccccccc) => {}
}
}
fn issue1371() | }
sfEvtKeyPressed => {
let e = unsafe { event.key.as_ref() };
KeyPressed {
code: unsafe { ::std::mem::transmute(e.code) },
alt: e.alt.to_bool(),
ctrl: e.control.to_bool(),
shift: e.shift.to_bool(),
system: e.system.to_bool(),
}
}
sfEvtKeyReleased => {
let e = unsafe { event.key.as_ref() };
KeyReleased {
code: unsafe { ::std::mem::transmute(e.code) },
alt: e.alt.to_bool(),
ctrl: e.control.to_bool(),
shift: e.shift.to_bool(),
system: e.system.to_bool(),
}
}
})
}
fn issue1395() {
let bar = Some(true);
let foo = Some(true);
let mut x = false;
bar.and_then(|_| {
match foo {
None => None,
Some(b) => {
x = true;
Some(b)
}
}
});
}
fn issue1456() {
Ok(Recording {
artists: match reader.evaluate(".//mb:recording/mb:artist-credit/mb:name-credit")? {
Nodeset(nodeset) => {
let res: Result<Vec<ArtistRef>, ReadError> = nodeset
.iter()
.map(|node| {
XPathNodeReader::new(node, &context).and_then(|r| ArtistRef::from_xml(&r))
})
.collect();
res?
}
_ => Vec::new(),
},
})
}
fn issue1460() {
let _ = match foo {
REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT => "internal_spec_insert_internal_spec_insert_internal_spec_insert",
_ => "reorder_something",
};
}
fn issue525() {
foobar(f, "{}", match *self {
TaskState::Started => "started",
TaskState::Success => "success",
TaskState::Failed => "failed",
});
}
// #1838, #1839
fn match_with_near_max_width() {
let (this_line_uses_99_characters_and_is_formatted_properly, x012345) = match some_expression {
_ => unimplemented!(),
};
let (should_be_formatted_like_the_line_above_using_100_characters, x0) = match some_expression {
_ => unimplemented!(),
};
let (should_put_the_brace_on_the_next_line_using_101_characters, x0000) = match some_expression
{
_ => unimplemented!(),
};
match m {
Variant::Tag | Variant::Tag2 | Variant::Tag3 | Variant::Tag4 | Variant::Tag5 | Variant::Tag6 =>
{}
}
}
fn match_with_trailing_spaces() {
match x {
Some(..) => 0,
None => 1,
}
}
fn issue_2099() {
let a = match x {
};
let b = match x {
};
match x {}
}
// #2021
impl<'tcx> Const<'tcx> {
pub fn from_constval<'a>() -> Const<'tcx> {
let val = match *cv {
ConstVal::Variant(_) | ConstVal::Aggregate(..) | ConstVal::Unevaluated(..) => bug!("MIR must not use `{:?}` (aggregates are expanded to MIR rvalues)", cv),
};
}
}
// #2151
fn issue_2151() {
match either {
x => {
}y => ()
}
}
// #2152
fn issue_2152() {
match m {
"aaaaaaaaaaaaa" | "bbbbbbbbbbbbb" | "cccccccccccccccccccccccccccccccccccccccccccc" if true => {}
"bind" | "writev" | "readv" | "sendmsg" | "recvmsg" if android && (aarch64 || x86_64) => true,
}
}
// #2376
// Preserve block around expressions with condition.
fn issue_2376() {
let mut x = None;
match x {
Some(0) => {
for i in 1..11 {
x = Some(i);
}
}
Some(ref mut y) => {
while *y < 10 {
*y += 1;
}
}
None => {
while let None = x {
x = Some(10);
}
}
}
}
// #2621
// Strip leading `|` in match arm patterns
fn issue_2621() {
let x = Foo::A;
match x {
Foo::A => println!("No vert single condition"),
Foo::B | Foo::C => println!("Center vert two conditions"),
| Foo::D => println!("Preceding vert single condition"),
| Foo::E
| Foo::F => println!("Preceding vert over two lines"),
Foo::G |
Foo::H => println!("Trailing vert over two lines"),
// Comment on its own line
| Foo::I => println!("With comment"), // Comment after line
}
}
fn issue_2377() {
match tok {
Tok::Not
| Tok::BNot
| Tok::Plus
| Tok::Minus
| Tok::PlusPlus
| Tok::MinusMinus
| Tok::Void
| Tok::Delete if prec <= 16 => {
// code here...
}
Tok::TypeOf if prec <= 16 => {}
}
}
// #3040
fn issue_3040() {
{
match foo {
DevtoolScriptControlMsg::WantsLiveNotifications(id, to_send) => {
match documents.find_window(id) {
Some(window) => devtools::handle_wants_live_notifications(window.upcast(), to_send),
None => return warn!("Message sent to closed pipeline {}.", id),
}
}
}
}
}
// #3030
fn issue_3030() {
match input.trim().parse::<f64>() {
Ok(val)
if!(
// A valid number is the same as what rust considers to be valid,
// except for +1., NaN, and Infinity.
val.is_infinite() || val
.is_nan() || input.ends_with(".") || input.starts_with("+")
)
=> {
}
}
}
fn issue_3005() {
match *token {
Token::Dimension {
value, ref unit,..
} if num_context.is_ok(context.parsing_mode, value) =>
{
return NoCalcLength::parse_dimension(context, value, unit)
.map(LengthOrPercentage::Length)
.map | {
Some(match type_ {
sfEvtClosed => Closed,
sfEvtResized => {
let e = unsafe { *event.size.as_ref() };
Resized {
width: e.width,
height: e.height,
}
}
sfEvtLostFocus => LostFocus,
sfEvtGainedFocus => GainedFocus,
sfEvtTextEntered => {
TextEntered {
unicode: unsafe {
::std::char::from_u32((*event.text.as_ref()).unicode)
.expect("Invalid unicode encountered on TextEntered event")
},
} | identifier_body |
match.rs | nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn |
Paternnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn => {
blah
}
Patternnnnnnnnnnnnnnnnnnn |
Patternnnnnnnnnnnnnnnnnnn |
Patternnnnnnnnnnnnnnnnnnn |
Patternnnnnnnnnnnnnnnnnnn => meh,
Patternnnnnnnnnnnnnnnnnnn |
Patternnnnnnnnnnnnnnnnnnn if looooooooooooooooooong_guard => meh,
Patternnnnnnnnnnnnnnnnnnnnnnnnn |
Patternnnnnnnnnnnnnnnnnnnnnnnnn if looooooooooooooooooooooooooooooooooooooooong_guard =>
meh,
// Test that earlier patterns can take the guard space
(aaaa, bbbbb, ccccccc, aaaaa, bbbbbbbb, cccccc, aaaa, bbbbbbbb, cccccc, dddddd) |
Patternnnnnnnnnnnnnnnnnnnnnnnnn if loooooooooooooooooooooooooooooooooooooooooong_guard => {}
_ => {}
ast::PathParameters::AngleBracketedParameters(ref data) if data.lifetimes.len() > 0 ||
data.types.len() > 0 ||
data.bindings.len() > 0 => {}
}
let whatever = match something {
/// DOC COMMENT!
Some(_) => 42,
// Comment on an attribute.
#[an_attribute]
// Comment after an attribute.
None => 0,
#[rustfmt::skip]
Blurb => { }
};
}
// Test that a match on an overflow line is laid out properly.
fn main() {
let sub_span =
match xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx {
Some(sub_span) => Some(sub_span),
None => sub_span,
};
}
// Test that one-line bodies align.
fn main() {
match r {
Variableeeeeeeeeeeeeeeeee => ( "variable",
vec!("id", "name", "qualname",
"value", "type", "scopeid"),
true,
true),
Enummmmmmmmmmmmmmmmmmmmm => ("enum",
vec!("id","qualname","scopeid","value"),
true,
true),
Variantttttttttttttttttttttttt => ("variant",
vec!("id",
"name",
"qualname",
"type",
"value",
"scopeid"),
true,
true),
};
match x{
y=>{/*Block with comment. Preserve me.*/ }
z=>{stmt();} }
}
fn matches() {
match 1 {
-1 => 10,
1 => 1, // foo
2 => 2,
// bar
3 => 3,
_ => 0 // baz
}
}
fn match_skip() {
let _ = match Some(1) {
#[rustfmt::skip]
Some( n ) => n,
None => 1,
};
}
fn issue339() {
match a {
b => {}
c => { }
d => {
}
e => {
}
// collapsing here is safe
ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff => {
}
// collapsing here exceeds line length
ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffg => {
}
h => { // comment above block
}
i => {
} // comment below block
j => {
// comment inside block
}
j2 => {
// comments inside...
} //... and after
// TODO uncomment when vertical whitespace is handled better
// k => {
//
// // comment with WS above
// }
// l => {
// // comment with ws below
//
// }
m => {
} n => { } o =>
{
}
p => { // Don't collapse me
} q => { } r =>
{
}
s => 0, // s comment
// t comment
t => 1,
u => 2,
v => {
} /* funky block
* comment */
// final comment
}
}
fn issue355() {
match mac {
a => println!("a", b),
b => vec!(1, 2),
c => vec!(3; 4),
d => {
println!("a", b)
}
e => {
vec!(1, 2)
}
f => {
vec!(3; 4)
}
h => println!("a", b), // h comment
i => vec!(1, 2), // i comment
j => vec!(3; 4), // j comment
// k comment
k => println!("a", b),
// l comment
l => vec!(1, 2),
// m comment
m => vec!(3; 4),
// Rewrite splits macro
nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn => println!("a", b),
// Rewrite splits macro
oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo => vec!(1, 2),
// Macro support fails to recognise this macro as splittable
// We push the whole expr to a new line, TODO split this macro as well
pppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppp => vec!(3; 4),
// q, r and s: Rewrite splits match arm
qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq => println!("a", b),
rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr => vec!(1, 2),
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss => vec!(3; 4),
// Funky bracketing styles
t => println!{"a", b},
u => vec!{1, 2},
v => vec!{3; 4},
w => println!["a", b],
x => vec![1, 2],
y =>vec![3; 4],
// Brackets with comments
tc => println!{"a", b}, // comment
uc => vec!{1, 2}, // comment
vc =>vec!{3; 4}, // comment
wc =>println!["a", b], // comment
xc => vec![1,2], // comment
yc => vec![3; 4], // comment
yd =>
looooooooooooooooooooooooooooooooooooooooooooooooooooooooong_func(aaaaaaaaaa,
bbbbbbbbbb,
cccccccccc,
dddddddddd),
}
}
fn issue280() {
{
match x {
CompressionMode::DiscardNewline | CompressionMode::CompressWhitespaceNewline => ch ==
'\n',
ast::ItemConst(ref typ, ref expr) => self.process_static_or_const_item(item,
&typ,
&expr),
}
}
}
fn issue383() {
match resolution.last_private {LastImport{..} => false, _ => true};
}
fn issue507() {
match 1 {
1 => unsafe { std::intrinsics::abort() },
_ => (),
}
}
fn issue508() {
match s.type_id() {
Some(NodeTypeId::Element(ElementTypeId::HTMLElement(
HTMLElementTypeId::HTMLCanvasElement))) => true,
Some(NodeTypeId::Element(ElementTypeId::HTMLElement(
HTMLElementTypeId::HTMLObjectElement))) => s.has_object_data(),
Some(NodeTypeId::Element(_)) => false,
}
}
fn issue496() {{{{
match def {
def::DefConst(def_id) | def::DefAssociatedConst(def_id) =>
match const_eval::lookup_const_by_id(cx.tcx, def_id, Some(self.pat.id)) {
Some(const_expr) => { x }}}}}}}
fn issue494() {
{
match stmt.node {
hir::StmtExpr(ref expr, id) | hir::StmtSemi(ref expr, id) =>
result.push(
StmtRef::Mirror(
Box::new(Stmt { span: stmt.span,
kind: StmtKind::Expr {
scope: cx.tcx.region_maps.node_extent(id),
expr: expr.to_ref() } }))),
}
}
}
fn issue386() {
match foo {
BiEq | BiLt | BiLe | BiNe | BiGt | BiGe =>
true,
BiAnd | BiOr | BiAdd | BiSub | BiMul | BiDiv | BiRem |
BiBitXor | BiBitAnd | BiBitOr | BiShl | BiShr =>
false,
}
}
fn guards() {
match foo {
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa if foooooooooooooo && barrrrrrrrrrrr => {}
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa if foooooooooooooo && barrrrrrrrrrrr => {}
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
if fooooooooooooooooooooo &&
(bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb || cccccccccccccccccccccccccccccccccccccccc) => {}
}
}
fn issue1371() {
Some(match type_ {
sfEvtClosed => Closed,
sfEvtResized => {
let e = unsafe { *event.size.as_ref() };
Resized {
width: e.width,
height: e.height,
}
}
sfEvtLostFocus => LostFocus,
sfEvtGainedFocus => GainedFocus,
sfEvtTextEntered => {
TextEntered {
unicode: unsafe {
::std::char::from_u32((*event.text.as_ref()).unicode)
.expect("Invalid unicode encountered on TextEntered event")
},
}
}
sfEvtKeyPressed => {
let e = unsafe { event.key.as_ref() };
KeyPressed {
code: unsafe { ::std::mem::transmute(e.code) },
alt: e.alt.to_bool(),
ctrl: e.control.to_bool(),
shift: e.shift.to_bool(),
system: e.system.to_bool(),
}
}
sfEvtKeyReleased => {
let e = unsafe { event.key.as_ref() };
KeyReleased {
code: unsafe { ::std::mem::transmute(e.code) },
alt: e.alt.to_bool(),
ctrl: e.control.to_bool(),
shift: e.shift.to_bool(),
system: e.system.to_bool(),
}
}
})
}
fn issue1395() {
let bar = Some(true);
let foo = Some(true);
let mut x = false;
bar.and_then(|_| {
match foo {
None => None,
Some(b) => {
x = true;
Some(b)
}
}
});
}
fn issue1456() {
Ok(Recording {
artists: match reader.evaluate(".//mb:recording/mb:artist-credit/mb:name-credit")? {
Nodeset(nodeset) => {
let res: Result<Vec<ArtistRef>, ReadError> = nodeset
.iter()
.map(|node| {
XPathNodeReader::new(node, &context).and_then(|r| ArtistRef::from_xml(&r))
})
.collect();
res?
}
_ => Vec::new(),
},
})
}
fn issue1460() {
let _ = match foo {
REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT => "internal_spec_insert_internal_spec_insert_internal_spec_insert",
_ => "reorder_something",
};
}
fn | () {
foobar(f, "{}", match *self {
TaskState::Started => "started",
TaskState::Success => "success",
TaskState::Failed => "failed",
});
}
// #1838, #1839
fn match_with_near_max_width() {
let (this_line_uses_99_characters_and_is_formatted_properly, x012345) = match some_expression {
_ => unimplemented!(),
};
let (should_be_formatted_like_the_line_above_using_100_characters, x0) = match some_expression {
_ => unimplemented!(),
};
let (should_put_the_brace_on_the_next_line_using_101_characters, x0000) = match some_expression
{
_ => unimplemented!(),
};
match m {
Variant::Tag | Variant::Tag2 | Variant::Tag3 | Variant::Tag4 | Variant::Tag5 | Variant::Tag6 =>
{}
}
}
fn match_with_trailing_spaces() {
match x {
Some(..) => 0,
None => 1,
}
}
fn issue_2099() {
let a = match x {
};
let b = match x {
};
match x {}
}
// #2021
impl<'tcx> Const<'tcx> {
pub fn from_constval<'a>() -> Const<'tcx> {
let val = match *cv {
ConstVal::Variant(_) | ConstVal::Aggregate(..) | ConstVal::Unevaluated(..) => bug!("MIR must not use `{:?}` (aggregates are expanded to MIR rvalues)", cv),
};
}
}
// #2151
fn issue_2151() {
match either {
x => {
}y => ()
}
}
// #2152
fn issue_2152() {
match m {
"aaaaaaaaaaaaa" | "bbbbbbbbbbbbb" | "cccccccccccccccccccccccccccccccccccccccccccc" if true => {}
"bind" | "writev" | "readv" | "sendmsg" | "recvmsg" if android && (aarch64 || x86_64) => true,
}
}
// #2376
// Preserve block around expressions with condition.
fn issue_2376() {
let mut x = None;
match x {
Some(0) => {
for i in 1..11 {
x = Some(i);
}
}
Some(ref mut y) => {
while *y < 10 {
*y += 1;
}
}
None => {
while let None = x {
x = Some(10);
}
}
}
}
// #2621
// Strip leading `|` in match arm patterns
fn issue_2621() {
let x = Foo::A;
match x {
Foo::A => println!("No vert single condition"),
Foo::B | Foo::C => println!("Center vert two conditions"),
| Foo::D => println!("Preceding vert single condition"),
| Foo::E
| Foo::F => println!("Preceding vert over two lines"),
Foo::G |
Foo::H => println!("Trailing vert over two lines"),
// Comment on its own line
| Foo::I => println!("With comment"), // Comment after line
}
}
fn issue_2377() {
match tok {
Tok::Not
| Tok::BNot
| Tok::Plus
| Tok::Minus
| Tok::PlusPlus
| Tok::MinusMinus
| Tok::Void
| Tok::Delete if prec <= 16 => {
// code here...
}
Tok::TypeOf if prec <= 16 => {}
}
}
// #3040
fn issue_3040() {
{
match foo {
DevtoolScriptControlMsg::WantsLiveNotifications(id, to_send) => {
match documents.find_window(id) {
Some(window) => devtools::handle_wants_live_notifications(window.upcast(), to_send),
None => return warn!("Message sent to closed pipeline {}.", id),
}
}
}
}
}
// #3030
fn issue_3030() {
match input.trim().parse::<f64>() {
Ok(val)
if!(
// A valid number is the same as what rust considers to be valid,
// except for +1., NaN, and Infinity.
val.is_infinite() || val
.is_nan() || input.ends_with(".") || input.starts_with("+")
)
=> {
}
}
}
fn issue_3005() {
match *token {
Token::Dimension {
value, ref unit,..
} if num_context.is_ok(context.parsing_mode, value) =>
{
return NoCalcLength::parse_dimension(context, value, unit)
.map(LengthOrPercentage::Length)
.map | issue525 | identifier_name |
match.rs | ternnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn |
Paternnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn => {
blah
}
Patternnnnnnnnnnnnnnnnnnn |
Patternnnnnnnnnnnnnnnnnnn |
Patternnnnnnnnnnnnnnnnnnn |
Patternnnnnnnnnnnnnnnnnnn => meh,
Patternnnnnnnnnnnnnnnnnnn |
Patternnnnnnnnnnnnnnnnnnn if looooooooooooooooooong_guard => meh,
Patternnnnnnnnnnnnnnnnnnnnnnnnn |
Patternnnnnnnnnnnnnnnnnnnnnnnnn if looooooooooooooooooooooooooooooooooooooooong_guard =>
meh,
// Test that earlier patterns can take the guard space
(aaaa, bbbbb, ccccccc, aaaaa, bbbbbbbb, cccccc, aaaa, bbbbbbbb, cccccc, dddddd) |
Patternnnnnnnnnnnnnnnnnnnnnnnnn if loooooooooooooooooooooooooooooooooooooooooong_guard => {}
_ => {}
ast::PathParameters::AngleBracketedParameters(ref data) if data.lifetimes.len() > 0 ||
data.types.len() > 0 ||
data.bindings.len() > 0 => {}
}
let whatever = match something {
/// DOC COMMENT!
Some(_) => 42,
// Comment on an attribute.
#[an_attribute]
// Comment after an attribute.
None => 0,
#[rustfmt::skip]
Blurb => { }
};
}
// Test that a match on an overflow line is laid out properly.
fn main() {
let sub_span =
match xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx {
Some(sub_span) => Some(sub_span),
None => sub_span,
};
}
// Test that one-line bodies align.
fn main() {
match r {
Variableeeeeeeeeeeeeeeeee => ( "variable",
vec!("id", "name", "qualname",
"value", "type", "scopeid"),
true,
true),
Enummmmmmmmmmmmmmmmmmmmm => ("enum",
vec!("id","qualname","scopeid","value"),
true,
true),
Variantttttttttttttttttttttttt => ("variant",
vec!("id",
"name",
"qualname",
"type",
"value",
"scopeid"),
true,
true),
};
match x{
y=>{/*Block with comment. Preserve me.*/ }
z=>{stmt();} }
}
fn matches() {
match 1 {
-1 => 10,
1 => 1, // foo
2 => 2,
// bar
3 => 3,
_ => 0 // baz
}
}
fn match_skip() {
let _ = match Some(1) {
#[rustfmt::skip]
Some( n ) => n,
None => 1,
};
}
fn issue339() {
match a {
b => {}
c => { }
d => {
}
e => {
}
// collapsing here is safe
ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff => {
}
// collapsing here exceeds line length
ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffg => {
}
h => { // comment above block
}
i => {
} // comment below block
j => {
// comment inside block
}
j2 => {
// comments inside...
} //... and after
// TODO uncomment when vertical whitespace is handled better
// k => {
//
// // comment with WS above | m => {
} n => { } o =>
{
}
p => { // Don't collapse me
} q => { } r =>
{
}
s => 0, // s comment
// t comment
t => 1,
u => 2,
v => {
} /* funky block
* comment */
// final comment
}
}
fn issue355() {
match mac {
a => println!("a", b),
b => vec!(1, 2),
c => vec!(3; 4),
d => {
println!("a", b)
}
e => {
vec!(1, 2)
}
f => {
vec!(3; 4)
}
h => println!("a", b), // h comment
i => vec!(1, 2), // i comment
j => vec!(3; 4), // j comment
// k comment
k => println!("a", b),
// l comment
l => vec!(1, 2),
// m comment
m => vec!(3; 4),
// Rewrite splits macro
nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn => println!("a", b),
// Rewrite splits macro
oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo => vec!(1, 2),
// Macro support fails to recognise this macro as splittable
// We push the whole expr to a new line, TODO split this macro as well
pppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppp => vec!(3; 4),
// q, r and s: Rewrite splits match arm
qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq => println!("a", b),
rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr => vec!(1, 2),
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss => vec!(3; 4),
// Funky bracketing styles
t => println!{"a", b},
u => vec!{1, 2},
v => vec!{3; 4},
w => println!["a", b],
x => vec![1, 2],
y =>vec![3; 4],
// Brackets with comments
tc => println!{"a", b}, // comment
uc => vec!{1, 2}, // comment
vc =>vec!{3; 4}, // comment
wc =>println!["a", b], // comment
xc => vec![1,2], // comment
yc => vec![3; 4], // comment
yd =>
looooooooooooooooooooooooooooooooooooooooooooooooooooooooong_func(aaaaaaaaaa,
bbbbbbbbbb,
cccccccccc,
dddddddddd),
}
}
fn issue280() {
{
match x {
CompressionMode::DiscardNewline | CompressionMode::CompressWhitespaceNewline => ch ==
'\n',
ast::ItemConst(ref typ, ref expr) => self.process_static_or_const_item(item,
&typ,
&expr),
}
}
}
fn issue383() {
match resolution.last_private {LastImport{..} => false, _ => true};
}
fn issue507() {
match 1 {
1 => unsafe { std::intrinsics::abort() },
_ => (),
}
}
fn issue508() {
match s.type_id() {
Some(NodeTypeId::Element(ElementTypeId::HTMLElement(
HTMLElementTypeId::HTMLCanvasElement))) => true,
Some(NodeTypeId::Element(ElementTypeId::HTMLElement(
HTMLElementTypeId::HTMLObjectElement))) => s.has_object_data(),
Some(NodeTypeId::Element(_)) => false,
}
}
fn issue496() {{{{
match def {
def::DefConst(def_id) | def::DefAssociatedConst(def_id) =>
match const_eval::lookup_const_by_id(cx.tcx, def_id, Some(self.pat.id)) {
Some(const_expr) => { x }}}}}}}
fn issue494() {
{
match stmt.node {
hir::StmtExpr(ref expr, id) | hir::StmtSemi(ref expr, id) =>
result.push(
StmtRef::Mirror(
Box::new(Stmt { span: stmt.span,
kind: StmtKind::Expr {
scope: cx.tcx.region_maps.node_extent(id),
expr: expr.to_ref() } }))),
}
}
}
fn issue386() {
match foo {
BiEq | BiLt | BiLe | BiNe | BiGt | BiGe =>
true,
BiAnd | BiOr | BiAdd | BiSub | BiMul | BiDiv | BiRem |
BiBitXor | BiBitAnd | BiBitOr | BiShl | BiShr =>
false,
}
}
fn guards() {
match foo {
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa if foooooooooooooo && barrrrrrrrrrrr => {}
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa if foooooooooooooo && barrrrrrrrrrrr => {}
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
if fooooooooooooooooooooo &&
(bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb || cccccccccccccccccccccccccccccccccccccccc) => {}
}
}
fn issue1371() {
Some(match type_ {
sfEvtClosed => Closed,
sfEvtResized => {
let e = unsafe { *event.size.as_ref() };
Resized {
width: e.width,
height: e.height,
}
}
sfEvtLostFocus => LostFocus,
sfEvtGainedFocus => GainedFocus,
sfEvtTextEntered => {
TextEntered {
unicode: unsafe {
::std::char::from_u32((*event.text.as_ref()).unicode)
.expect("Invalid unicode encountered on TextEntered event")
},
}
}
sfEvtKeyPressed => {
let e = unsafe { event.key.as_ref() };
KeyPressed {
code: unsafe { ::std::mem::transmute(e.code) },
alt: e.alt.to_bool(),
ctrl: e.control.to_bool(),
shift: e.shift.to_bool(),
system: e.system.to_bool(),
}
}
sfEvtKeyReleased => {
let e = unsafe { event.key.as_ref() };
KeyReleased {
code: unsafe { ::std::mem::transmute(e.code) },
alt: e.alt.to_bool(),
ctrl: e.control.to_bool(),
shift: e.shift.to_bool(),
system: e.system.to_bool(),
}
}
})
}
fn issue1395() {
let bar = Some(true);
let foo = Some(true);
let mut x = false;
bar.and_then(|_| {
match foo {
None => None,
Some(b) => {
x = true;
Some(b)
}
}
});
}
fn issue1456() {
Ok(Recording {
artists: match reader.evaluate(".//mb:recording/mb:artist-credit/mb:name-credit")? {
Nodeset(nodeset) => {
let res: Result<Vec<ArtistRef>, ReadError> = nodeset
.iter()
.map(|node| {
XPathNodeReader::new(node, &context).and_then(|r| ArtistRef::from_xml(&r))
})
.collect();
res?
}
_ => Vec::new(),
},
})
}
fn issue1460() {
let _ = match foo {
REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT => "internal_spec_insert_internal_spec_insert_internal_spec_insert",
_ => "reorder_something",
};
}
fn issue525() {
foobar(f, "{}", match *self {
TaskState::Started => "started",
TaskState::Success => "success",
TaskState::Failed => "failed",
});
}
// #1838, #1839
fn match_with_near_max_width() {
let (this_line_uses_99_characters_and_is_formatted_properly, x012345) = match some_expression {
_ => unimplemented!(),
};
let (should_be_formatted_like_the_line_above_using_100_characters, x0) = match some_expression {
_ => unimplemented!(),
};
let (should_put_the_brace_on_the_next_line_using_101_characters, x0000) = match some_expression
{
_ => unimplemented!(),
};
match m {
Variant::Tag | Variant::Tag2 | Variant::Tag3 | Variant::Tag4 | Variant::Tag5 | Variant::Tag6 =>
{}
}
}
fn match_with_trailing_spaces() {
match x {
Some(..) => 0,
None => 1,
}
}
fn issue_2099() {
let a = match x {
};
let b = match x {
};
match x {}
}
// #2021
impl<'tcx> Const<'tcx> {
pub fn from_constval<'a>() -> Const<'tcx> {
let val = match *cv {
ConstVal::Variant(_) | ConstVal::Aggregate(..) | ConstVal::Unevaluated(..) => bug!("MIR must not use `{:?}` (aggregates are expanded to MIR rvalues)", cv),
};
}
}
// #2151
fn issue_2151() {
match either {
x => {
}y => ()
}
}
// #2152
fn issue_2152() {
match m {
"aaaaaaaaaaaaa" | "bbbbbbbbbbbbb" | "cccccccccccccccccccccccccccccccccccccccccccc" if true => {}
"bind" | "writev" | "readv" | "sendmsg" | "recvmsg" if android && (aarch64 || x86_64) => true,
}
}
// #2376
// Preserve block around expressions with condition.
fn issue_2376() {
let mut x = None;
match x {
Some(0) => {
for i in 1..11 {
x = Some(i);
}
}
Some(ref mut y) => {
while *y < 10 {
*y += 1;
}
}
None => {
while let None = x {
x = Some(10);
}
}
}
}
// #2621
// Strip leading `|` in match arm patterns
fn issue_2621() {
let x = Foo::A;
match x {
Foo::A => println!("No vert single condition"),
Foo::B | Foo::C => println!("Center vert two conditions"),
| Foo::D => println!("Preceding vert single condition"),
| Foo::E
| Foo::F => println!("Preceding vert over two lines"),
Foo::G |
Foo::H => println!("Trailing vert over two lines"),
// Comment on its own line
| Foo::I => println!("With comment"), // Comment after line
}
}
fn issue_2377() {
match tok {
Tok::Not
| Tok::BNot
| Tok::Plus
| Tok::Minus
| Tok::PlusPlus
| Tok::MinusMinus
| Tok::Void
| Tok::Delete if prec <= 16 => {
// code here...
}
Tok::TypeOf if prec <= 16 => {}
}
}
// #3040
fn issue_3040() {
{
match foo {
DevtoolScriptControlMsg::WantsLiveNotifications(id, to_send) => {
match documents.find_window(id) {
Some(window) => devtools::handle_wants_live_notifications(window.upcast(), to_send),
None => return warn!("Message sent to closed pipeline {}.", id),
}
}
}
}
}
// #3030
fn issue_3030() {
match input.trim().parse::<f64>() {
Ok(val)
if!(
// A valid number is the same as what rust considers to be valid,
// except for +1., NaN, and Infinity.
val.is_infinite() || val
.is_nan() || input.ends_with(".") || input.starts_with("+")
)
=> {
}
}
}
fn issue_3005() {
match *token {
Token::Dimension {
value, ref unit,..
} if num_context.is_ok(context.parsing_mode, value) =>
{
return NoCalcLength::parse_dimension(context, value, unit)
.map(LengthOrPercentage::Length)
.map_ | // }
// l => {
// // comment with ws below
//
// } | random_line_split |
char_indexing.rs | use std::ops::Range;
pub(crate) trait CharIndexable<'b> {
fn char_index(&'b self, range: Range<usize>) -> &'b str;
}
pub struct CharIndexableStr<'a> {
s: &'a str,
indices: Vec<usize>,
}
impl CharIndexableStr<'_> {
pub(crate) fn char_count(&self) -> usize {
self.indices.len()
}
}
impl<'a> From<&'a str> for CharIndexableStr<'a> {
fn | (s: &'a str) -> Self {
CharIndexableStr {
indices: s.char_indices().map(|(i, _c)| i).collect(),
s,
}
}
}
impl<'a, 'b: 'a> CharIndexable<'b> for CharIndexableStr<'a> {
fn char_index(&'b self, range: Range<usize>) -> &'b str {
if range.end >= self.indices.len() {
&self.s[self.indices[range.start]..]
} else {
&self.s[self.indices[range.start]..self.indices[range.end]]
}
}
}
| from | identifier_name |
char_indexing.rs | use std::ops::Range;
pub(crate) trait CharIndexable<'b> {
fn char_index(&'b self, range: Range<usize>) -> &'b str;
}
pub struct CharIndexableStr<'a> {
s: &'a str,
indices: Vec<usize>,
}
impl CharIndexableStr<'_> {
pub(crate) fn char_count(&self) -> usize {
self.indices.len()
}
}
impl<'a> From<&'a str> for CharIndexableStr<'a> {
fn from(s: &'a str) -> Self {
CharIndexableStr {
indices: s.char_indices().map(|(i, _c)| i).collect(),
s,
}
}
}
impl<'a, 'b: 'a> CharIndexable<'b> for CharIndexableStr<'a> {
fn char_index(&'b self, range: Range<usize>) -> &'b str {
if range.end >= self.indices.len() {
&self.s[self.indices[range.start]..]
} else |
}
}
| {
&self.s[self.indices[range.start]..self.indices[range.end]]
} | conditional_block |
char_indexing.rs | use std::ops::Range;
pub(crate) trait CharIndexable<'b> {
fn char_index(&'b self, range: Range<usize>) -> &'b str;
}
pub struct CharIndexableStr<'a> {
s: &'a str,
indices: Vec<usize>,
}
impl CharIndexableStr<'_> {
pub(crate) fn char_count(&self) -> usize {
self.indices.len()
}
}
impl<'a> From<&'a str> for CharIndexableStr<'a> {
fn from(s: &'a str) -> Self {
CharIndexableStr {
indices: s.char_indices().map(|(i, _c)| i).collect(),
s,
}
}
}
impl<'a, 'b: 'a> CharIndexable<'b> for CharIndexableStr<'a> {
fn char_index(&'b self, range: Range<usize>) -> &'b str {
if range.end >= self.indices.len() { | &self.s[self.indices[range.start]..self.indices[range.end]]
}
}
} | &self.s[self.indices[range.start]..]
} else { | random_line_split |
estr-slice.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn main() |
info!(a);
assert!(a < b);
assert!(a <= b);
assert!(a!= b);
assert!(b >= a);
assert!(b > a);
info!(b);
assert!(a < c);
assert!(a <= c);
assert!(a!= c);
assert!(c >= a);
assert!(c > a);
info!(c);
assert!(c < cc);
assert!(c <= cc);
assert!(c!= cc);
assert!(cc >= c);
assert!(cc > c);
info!(cc);
}
| {
let x = &"hello";
let v = &"hello";
let y : &str = &"there";
info!(x);
info!(y);
assert_eq!(x[0], 'h' as u8);
assert_eq!(x[4], 'o' as u8);
let z : &str = &"thing";
assert_eq!(v, x);
assert!(x != z);
let a = &"aaaa";
let b = &"bbbb";
let c = &"cccc";
let cc = &"ccccc"; | identifier_body |
estr-slice.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn main() {
let x = &"hello";
let v = &"hello";
let y : &str = &"there";
info!(x);
info!(y);
assert_eq!(x[0], 'h' as u8);
assert_eq!(x[4], 'o' as u8);
let z : &str = &"thing";
assert_eq!(v, x);
assert!(x!= z);
let a = &"aaaa";
let b = &"bbbb";
let c = &"cccc";
let cc = &"ccccc";
info!(a);
assert!(a < b);
assert!(a <= b);
assert!(a!= b);
assert!(b >= a);
assert!(b > a);
info!(b);
assert!(a < c); | assert!(c > a);
info!(c);
assert!(c < cc);
assert!(c <= cc);
assert!(c!= cc);
assert!(cc >= c);
assert!(cc > c);
info!(cc);
} | assert!(a <= c);
assert!(a != c);
assert!(c >= a); | random_line_split |
estr-slice.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn | () {
let x = &"hello";
let v = &"hello";
let y : &str = &"there";
info!(x);
info!(y);
assert_eq!(x[0], 'h' as u8);
assert_eq!(x[4], 'o' as u8);
let z : &str = &"thing";
assert_eq!(v, x);
assert!(x!= z);
let a = &"aaaa";
let b = &"bbbb";
let c = &"cccc";
let cc = &"ccccc";
info!(a);
assert!(a < b);
assert!(a <= b);
assert!(a!= b);
assert!(b >= a);
assert!(b > a);
info!(b);
assert!(a < c);
assert!(a <= c);
assert!(a!= c);
assert!(c >= a);
assert!(c > a);
info!(c);
assert!(c < cc);
assert!(c <= cc);
assert!(c!= cc);
assert!(cc >= c);
assert!(cc > c);
info!(cc);
}
| main | identifier_name |
infer_regusage.rs | //! Infers how each function uses every register
//! For every function, patch all of its call sites to ignore registers that the
//! callee doesn't read and to preserve register values that the callee
//! preserves. Then, record which registers it reads and which registers it
//! preserves.
//!
//! After this, all functions should have mutable [`regusage`][RadecoFunction::regusage]s.
//!
//! This analysis is super conservative; for example, if a function preserves a
//! register by pushing it onto the stack and popping it back right before
//! returning, it is considered to be read and not preserved because we can't
//! guarantee that that stack location is never subsequently read or modified.
//! See #147 for further discussion
use analysis::analyzer::{
all, Action, Analyzer, AnalyzerInfo, AnalyzerKind, AnalyzerResult, Change, FuncAnalyzer,
ModuleAnalyzer,
};
use analysis::dce::DCE;
use analysis::inst_combine::Combiner;
use frontend::radeco_containers::{RadecoFunction, RadecoModule};
use middle::ir;
use middle::regfile::*;
use middle::ssa::cfg_traits::*;
use middle::ssa::ssa_traits::*;
use middle::ssa::ssastorage::SSAStorage;
use middle::ssa::utils;
use petgraph::visit::{DfsPostOrder, Walker};
use std::any::Any;
use std::collections::{BTreeMap, HashSet};
const NAME: &str = "inferer";
const REQUIRES: &[AnalyzerKind] = &[];
pub const INFO: AnalyzerInfo = AnalyzerInfo {
name: NAME,
kind: AnalyzerKind::Inferer,
requires: REQUIRES,
uses_policy: false,
};
#[derive(Debug)]
pub struct Inferer {
/// Register file of the current architecture.
reginfo: SubRegisterFile,
/// Addresses of the functions we've already analyzed
analyzed: HashSet<u64>,
}
impl Analyzer for Inferer {
fn info(&self) -> &'static AnalyzerInfo {
&INFO
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl ModuleAnalyzer for Inferer {
/// Calls `patch_fn`, `dce::collect`, and `analyze_fn` on every function,
/// callees first
fn analyze<T: FnMut(Box<Change>) -> Action>(
&mut self,
rmod: &mut RadecoModule,
_policy: Option<T>,
) -> Option<Box<AnalyzerResult>> {
// for imports, *ASSUME* that the callconv that r2 says is correct
let mut new_analyzed = Vec::new();
{
let imp_ru_iter = rmod.imports.iter().filter_map(|(&imp_addr, imp_info)| {
let imp_rfn = imp_info.rfn.borrow();
let regusage = self.reginfo.r2callconv_to_register_usage(
imp_rfn.callconv.as_ref()?, // ignore imports without callconvs
&*imp_rfn.callconv_name,
)?;
Some((imp_addr, regusage))
});
for (imp_addr, imp_ru) in imp_ru_iter {
rmod.functions.get_mut(&imp_addr).unwrap().regusage = imp_ru;
new_analyzed.push(imp_addr);
}
}
for func in new_analyzed {
self.analyzed.insert(func);
}
let mut dfs_wi = DfsPostOrder::empty(&rmod.callgraph).iter(&rmod.callgraph);
// pick a function...
for fn_ni in rmod.callgraph.node_indices() {
//... and start a dfs on it
dfs_wi.inner_mut().move_to(fn_ni);
while let Some(fn_to_anal) = dfs_wi.next() {
let fn_addr = rmod.callgraph[fn_to_anal];
// ignore functions already in `call_convs` (probably because its an import)
if!self.analyzed.contains(&fn_addr) {
self.patch_fn(fn_addr, &mut rmod.functions);
let rfn = &mut rmod.functions.get_mut(&fn_addr).unwrap();
let mut dce = DCE::new();
dce.analyze(rfn, Some(all));
let mut combiner = Combiner::new();
combiner.analyze(rfn, Some(all));
let ru = self.analyze_fn(rfn, &self.reginfo).unwrap_or_else(|| {
radeco_err!("Failed to analyze fn: {:?} (@ {:#X})", rfn.name, fn_addr);
// if analysis failed, default to "reads and clobbers everything"
self.reginfo.new_register_usage()
});
rfn.regusage = ru;
self.analyzed.insert(fn_addr);
}
}
}
None
}
}
impl Inferer {
pub fn new(reginfo: SubRegisterFile) -> Inferer {
Inferer {
reginfo: reginfo,
analyzed: HashSet::new(),
}
}
/// Using the callconv info we've gathered so far, patch-up call sites to
/// to remove arguments that the callee doesn't read and make values in
/// callee-saved registers be preserved across the call.
fn patch_fn(&self, fn_addr: u64, fn_map: &mut BTreeMap<u64, RadecoFunction>) -> () {
radeco_trace!("patching calls in fn: {}", fn_map[&fn_addr].name);
for node in fn_map[&fn_addr].ssa().inorder_walk() {
if let Ok(NodeType::Op(ir::MOpcode::OpCall)) =
fn_map[&fn_addr].ssa().node_data(node).map(|nd| nd.nt)
{
self.patch_call_node(node, fn_addr, fn_map)
.unwrap_or_else(|| {
radeco_warn!(
"failed to remove unused args for call at {:#X}",
fn_map[&fn_addr].ssa().address(node).unwrap()
);
});
}
}
}
fn | (
&self,
call_node: <SSAStorage as SSA>::ValueRef,
fn_addr: u64,
fn_map: &mut BTreeMap<u64, RadecoFunction>,
) -> Option<()> {
// bail on indirect or weird call
let (call_tgt_addr, call_reg_map) = direct_call_info(fn_map[&fn_addr].ssa(), call_node)?;
// remove unread args
for (regid, &op_node) in &call_reg_map {
if fn_map[&call_tgt_addr].regusage.is_ignored(regid) {
fn_map
.get_mut(&fn_addr)
.unwrap()
.ssa_mut()
.op_unuse(call_node, op_node);
}
}
// bridge preserved registers
for (regid, (use_node, _)) in utils::call_rets(call_node, fn_map[&fn_addr].ssa()) {
if fn_map[&call_tgt_addr].regusage.is_preserved(regid) {
fn_map
.get_mut(&fn_addr)
.unwrap()
.ssa_mut()
.replace_value(use_node, call_reg_map[regid]);
}
}
Some(())
}
fn analyze_fn(&self, rfn: &RadecoFunction, reginfo: &SubRegisterFile) -> Option<RegisterUsage> {
radeco_trace!("analyzing fn: {}", rfn.name);
let ssa = rfn.ssa();
let entry_regstate_node = ssa.registers_in(ssa.entry_node()?)?;
let exit_regstate_node = ssa.registers_in(ssa.exit_node()?)?;
// some registers may not be present in the entry node;
// this means that the function neither reads nor preserves that register
let entry_regstate = utils::register_state_info(entry_regstate_node, ssa);
let exit_regstate = utils::register_state_info(exit_regstate_node, ssa);
let mut ret = reginfo.new_register_usage();
ret.set_all_ignored();
for regid in ssa.regfile.iter_register_ids() {
// ignore registers not in entry regstate
if let Some(&(reg_val_entry, _)) = entry_regstate.get(regid) {
// bail if a register isn't present in exit regstate
let &(reg_val_exit, _) = exit_regstate.get(regid)?;
if reg_val_exit == reg_val_entry {
ret.set_preserved(regid);
}
// find all uses, ignoring entry/exit register state
let mut uses_iter = ssa
.uses_of(reg_val_entry)
.into_iter()
.filter(|&n| n!= entry_regstate_node && n!= exit_regstate_node);
if uses_iter.next().is_some() {
ret.set_read(regid);
}
}
}
Some(ret)
}
}
fn direct_call_info(
ssa: &SSAStorage,
call_node: <SSAStorage as SSA>::ValueRef,
) -> Option<(u64, RegisterMap<<SSAStorage as SSA>::ValueRef>)> {
let callinfo = utils::call_info(call_node, ssa)?;
Some((ssa.constant(callinfo.target)?, callinfo.register_args))
}
| patch_call_node | identifier_name |
infer_regusage.rs | //! Infers how each function uses every register
//! For every function, patch all of its call sites to ignore registers that the
//! callee doesn't read and to preserve register values that the callee
//! preserves. Then, record which registers it reads and which registers it
//! preserves.
//!
//! After this, all functions should have mutable [`regusage`][RadecoFunction::regusage]s.
//!
//! This analysis is super conservative; for example, if a function preserves a
//! register by pushing it onto the stack and popping it back right before
//! returning, it is considered to be read and not preserved because we can't
//! guarantee that that stack location is never subsequently read or modified.
//! See #147 for further discussion
use analysis::analyzer::{
all, Action, Analyzer, AnalyzerInfo, AnalyzerKind, AnalyzerResult, Change, FuncAnalyzer,
ModuleAnalyzer,
};
use analysis::dce::DCE;
use analysis::inst_combine::Combiner;
use frontend::radeco_containers::{RadecoFunction, RadecoModule};
use middle::ir;
use middle::regfile::*;
use middle::ssa::cfg_traits::*;
use middle::ssa::ssa_traits::*;
use middle::ssa::ssastorage::SSAStorage;
use middle::ssa::utils;
use petgraph::visit::{DfsPostOrder, Walker};
use std::any::Any;
use std::collections::{BTreeMap, HashSet};
const NAME: &str = "inferer";
const REQUIRES: &[AnalyzerKind] = &[];
pub const INFO: AnalyzerInfo = AnalyzerInfo {
name: NAME,
kind: AnalyzerKind::Inferer,
requires: REQUIRES,
uses_policy: false,
};
#[derive(Debug)]
pub struct Inferer {
/// Register file of the current architecture.
reginfo: SubRegisterFile,
/// Addresses of the functions we've already analyzed
analyzed: HashSet<u64>,
}
impl Analyzer for Inferer {
fn info(&self) -> &'static AnalyzerInfo {
&INFO
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl ModuleAnalyzer for Inferer {
/// Calls `patch_fn`, `dce::collect`, and `analyze_fn` on every function,
/// callees first
fn analyze<T: FnMut(Box<Change>) -> Action>(
&mut self,
rmod: &mut RadecoModule,
_policy: Option<T>,
) -> Option<Box<AnalyzerResult>> {
// for imports, *ASSUME* that the callconv that r2 says is correct
let mut new_analyzed = Vec::new();
{
let imp_ru_iter = rmod.imports.iter().filter_map(|(&imp_addr, imp_info)| {
let imp_rfn = imp_info.rfn.borrow();
let regusage = self.reginfo.r2callconv_to_register_usage(
imp_rfn.callconv.as_ref()?, // ignore imports without callconvs
&*imp_rfn.callconv_name,
)?;
Some((imp_addr, regusage))
});
for (imp_addr, imp_ru) in imp_ru_iter {
rmod.functions.get_mut(&imp_addr).unwrap().regusage = imp_ru;
new_analyzed.push(imp_addr);
}
}
for func in new_analyzed {
self.analyzed.insert(func);
}
let mut dfs_wi = DfsPostOrder::empty(&rmod.callgraph).iter(&rmod.callgraph);
// pick a function...
for fn_ni in rmod.callgraph.node_indices() {
//... and start a dfs on it
dfs_wi.inner_mut().move_to(fn_ni);
while let Some(fn_to_anal) = dfs_wi.next() {
let fn_addr = rmod.callgraph[fn_to_anal];
// ignore functions already in `call_convs` (probably because its an import)
if!self.analyzed.contains(&fn_addr) |
}
}
None
}
}
impl Inferer {
pub fn new(reginfo: SubRegisterFile) -> Inferer {
Inferer {
reginfo: reginfo,
analyzed: HashSet::new(),
}
}
/// Using the callconv info we've gathered so far, patch-up call sites to
/// to remove arguments that the callee doesn't read and make values in
/// callee-saved registers be preserved across the call.
fn patch_fn(&self, fn_addr: u64, fn_map: &mut BTreeMap<u64, RadecoFunction>) -> () {
radeco_trace!("patching calls in fn: {}", fn_map[&fn_addr].name);
for node in fn_map[&fn_addr].ssa().inorder_walk() {
if let Ok(NodeType::Op(ir::MOpcode::OpCall)) =
fn_map[&fn_addr].ssa().node_data(node).map(|nd| nd.nt)
{
self.patch_call_node(node, fn_addr, fn_map)
.unwrap_or_else(|| {
radeco_warn!(
"failed to remove unused args for call at {:#X}",
fn_map[&fn_addr].ssa().address(node).unwrap()
);
});
}
}
}
fn patch_call_node(
&self,
call_node: <SSAStorage as SSA>::ValueRef,
fn_addr: u64,
fn_map: &mut BTreeMap<u64, RadecoFunction>,
) -> Option<()> {
// bail on indirect or weird call
let (call_tgt_addr, call_reg_map) = direct_call_info(fn_map[&fn_addr].ssa(), call_node)?;
// remove unread args
for (regid, &op_node) in &call_reg_map {
if fn_map[&call_tgt_addr].regusage.is_ignored(regid) {
fn_map
.get_mut(&fn_addr)
.unwrap()
.ssa_mut()
.op_unuse(call_node, op_node);
}
}
// bridge preserved registers
for (regid, (use_node, _)) in utils::call_rets(call_node, fn_map[&fn_addr].ssa()) {
if fn_map[&call_tgt_addr].regusage.is_preserved(regid) {
fn_map
.get_mut(&fn_addr)
.unwrap()
.ssa_mut()
.replace_value(use_node, call_reg_map[regid]);
}
}
Some(())
}
fn analyze_fn(&self, rfn: &RadecoFunction, reginfo: &SubRegisterFile) -> Option<RegisterUsage> {
radeco_trace!("analyzing fn: {}", rfn.name);
let ssa = rfn.ssa();
let entry_regstate_node = ssa.registers_in(ssa.entry_node()?)?;
let exit_regstate_node = ssa.registers_in(ssa.exit_node()?)?;
// some registers may not be present in the entry node;
// this means that the function neither reads nor preserves that register
let entry_regstate = utils::register_state_info(entry_regstate_node, ssa);
let exit_regstate = utils::register_state_info(exit_regstate_node, ssa);
let mut ret = reginfo.new_register_usage();
ret.set_all_ignored();
for regid in ssa.regfile.iter_register_ids() {
// ignore registers not in entry regstate
if let Some(&(reg_val_entry, _)) = entry_regstate.get(regid) {
// bail if a register isn't present in exit regstate
let &(reg_val_exit, _) = exit_regstate.get(regid)?;
if reg_val_exit == reg_val_entry {
ret.set_preserved(regid);
}
// find all uses, ignoring entry/exit register state
let mut uses_iter = ssa
.uses_of(reg_val_entry)
.into_iter()
.filter(|&n| n!= entry_regstate_node && n!= exit_regstate_node);
if uses_iter.next().is_some() {
ret.set_read(regid);
}
}
}
Some(ret)
}
}
fn direct_call_info(
ssa: &SSAStorage,
call_node: <SSAStorage as SSA>::ValueRef,
) -> Option<(u64, RegisterMap<<SSAStorage as SSA>::ValueRef>)> {
let callinfo = utils::call_info(call_node, ssa)?;
Some((ssa.constant(callinfo.target)?, callinfo.register_args))
}
| {
self.patch_fn(fn_addr, &mut rmod.functions);
let rfn = &mut rmod.functions.get_mut(&fn_addr).unwrap();
let mut dce = DCE::new();
dce.analyze(rfn, Some(all));
let mut combiner = Combiner::new();
combiner.analyze(rfn, Some(all));
let ru = self.analyze_fn(rfn, &self.reginfo).unwrap_or_else(|| {
radeco_err!("Failed to analyze fn: {:?} (@ {:#X})", rfn.name, fn_addr);
// if analysis failed, default to "reads and clobbers everything"
self.reginfo.new_register_usage()
});
rfn.regusage = ru;
self.analyzed.insert(fn_addr);
} | conditional_block |
infer_regusage.rs | //! Infers how each function uses every register
//! For every function, patch all of its call sites to ignore registers that the
//! callee doesn't read and to preserve register values that the callee
//! preserves. Then, record which registers it reads and which registers it
//! preserves.
//!
//! After this, all functions should have mutable [`regusage`][RadecoFunction::regusage]s.
//!
//! This analysis is super conservative; for example, if a function preserves a
//! register by pushing it onto the stack and popping it back right before
//! returning, it is considered to be read and not preserved because we can't
//! guarantee that that stack location is never subsequently read or modified.
//! See #147 for further discussion
use analysis::analyzer::{
all, Action, Analyzer, AnalyzerInfo, AnalyzerKind, AnalyzerResult, Change, FuncAnalyzer,
ModuleAnalyzer,
};
use analysis::dce::DCE;
use analysis::inst_combine::Combiner;
use frontend::radeco_containers::{RadecoFunction, RadecoModule};
use middle::ir;
use middle::regfile::*;
use middle::ssa::cfg_traits::*;
use middle::ssa::ssa_traits::*;
use middle::ssa::ssastorage::SSAStorage;
use middle::ssa::utils;
use petgraph::visit::{DfsPostOrder, Walker};
use std::any::Any;
use std::collections::{BTreeMap, HashSet};
const NAME: &str = "inferer";
const REQUIRES: &[AnalyzerKind] = &[];
pub const INFO: AnalyzerInfo = AnalyzerInfo {
name: NAME,
kind: AnalyzerKind::Inferer,
requires: REQUIRES,
uses_policy: false,
};
#[derive(Debug)]
pub struct Inferer {
/// Register file of the current architecture.
reginfo: SubRegisterFile,
/// Addresses of the functions we've already analyzed
analyzed: HashSet<u64>,
}
impl Analyzer for Inferer {
fn info(&self) -> &'static AnalyzerInfo {
&INFO
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl ModuleAnalyzer for Inferer {
/// Calls `patch_fn`, `dce::collect`, and `analyze_fn` on every function,
/// callees first
fn analyze<T: FnMut(Box<Change>) -> Action>(
&mut self,
rmod: &mut RadecoModule,
_policy: Option<T>,
) -> Option<Box<AnalyzerResult>> {
// for imports, *ASSUME* that the callconv that r2 says is correct
let mut new_analyzed = Vec::new();
{
let imp_ru_iter = rmod.imports.iter().filter_map(|(&imp_addr, imp_info)| {
let imp_rfn = imp_info.rfn.borrow();
let regusage = self.reginfo.r2callconv_to_register_usage(
imp_rfn.callconv.as_ref()?, // ignore imports without callconvs
&*imp_rfn.callconv_name,
)?;
Some((imp_addr, regusage))
});
for (imp_addr, imp_ru) in imp_ru_iter {
rmod.functions.get_mut(&imp_addr).unwrap().regusage = imp_ru;
new_analyzed.push(imp_addr);
}
}
for func in new_analyzed {
self.analyzed.insert(func);
}
let mut dfs_wi = DfsPostOrder::empty(&rmod.callgraph).iter(&rmod.callgraph);
// pick a function...
for fn_ni in rmod.callgraph.node_indices() {
//... and start a dfs on it
dfs_wi.inner_mut().move_to(fn_ni);
while let Some(fn_to_anal) = dfs_wi.next() {
let fn_addr = rmod.callgraph[fn_to_anal];
// ignore functions already in `call_convs` (probably because its an import)
if!self.analyzed.contains(&fn_addr) {
self.patch_fn(fn_addr, &mut rmod.functions);
let rfn = &mut rmod.functions.get_mut(&fn_addr).unwrap();
let mut dce = DCE::new();
dce.analyze(rfn, Some(all));
let mut combiner = Combiner::new();
combiner.analyze(rfn, Some(all));
let ru = self.analyze_fn(rfn, &self.reginfo).unwrap_or_else(|| {
radeco_err!("Failed to analyze fn: {:?} (@ {:#X})", rfn.name, fn_addr);
// if analysis failed, default to "reads and clobbers everything"
self.reginfo.new_register_usage()
});
rfn.regusage = ru;
self.analyzed.insert(fn_addr);
}
}
}
None
}
}
impl Inferer {
pub fn new(reginfo: SubRegisterFile) -> Inferer {
Inferer {
reginfo: reginfo,
analyzed: HashSet::new(),
}
}
/// Using the callconv info we've gathered so far, patch-up call sites to
/// to remove arguments that the callee doesn't read and make values in
/// callee-saved registers be preserved across the call.
fn patch_fn(&self, fn_addr: u64, fn_map: &mut BTreeMap<u64, RadecoFunction>) -> () {
radeco_trace!("patching calls in fn: {}", fn_map[&fn_addr].name);
for node in fn_map[&fn_addr].ssa().inorder_walk() {
if let Ok(NodeType::Op(ir::MOpcode::OpCall)) =
fn_map[&fn_addr].ssa().node_data(node).map(|nd| nd.nt)
{
self.patch_call_node(node, fn_addr, fn_map)
.unwrap_or_else(|| {
radeco_warn!(
"failed to remove unused args for call at {:#X}",
fn_map[&fn_addr].ssa().address(node).unwrap()
);
});
}
}
}
fn patch_call_node(
&self,
call_node: <SSAStorage as SSA>::ValueRef,
fn_addr: u64,
fn_map: &mut BTreeMap<u64, RadecoFunction>,
) -> Option<()> {
// bail on indirect or weird call
let (call_tgt_addr, call_reg_map) = direct_call_info(fn_map[&fn_addr].ssa(), call_node)?;
// remove unread args
for (regid, &op_node) in &call_reg_map {
if fn_map[&call_tgt_addr].regusage.is_ignored(regid) {
fn_map
.get_mut(&fn_addr)
.unwrap()
.ssa_mut()
.op_unuse(call_node, op_node);
}
}
// bridge preserved registers
for (regid, (use_node, _)) in utils::call_rets(call_node, fn_map[&fn_addr].ssa()) {
if fn_map[&call_tgt_addr].regusage.is_preserved(regid) {
fn_map
.get_mut(&fn_addr)
.unwrap()
.ssa_mut()
.replace_value(use_node, call_reg_map[regid]);
}
}
Some(())
}
fn analyze_fn(&self, rfn: &RadecoFunction, reginfo: &SubRegisterFile) -> Option<RegisterUsage> {
radeco_trace!("analyzing fn: {}", rfn.name);
let ssa = rfn.ssa();
let entry_regstate_node = ssa.registers_in(ssa.entry_node()?)?;
let exit_regstate_node = ssa.registers_in(ssa.exit_node()?)?;
// some registers may not be present in the entry node;
// this means that the function neither reads nor preserves that register | ret.set_all_ignored();
for regid in ssa.regfile.iter_register_ids() {
// ignore registers not in entry regstate
if let Some(&(reg_val_entry, _)) = entry_regstate.get(regid) {
// bail if a register isn't present in exit regstate
let &(reg_val_exit, _) = exit_regstate.get(regid)?;
if reg_val_exit == reg_val_entry {
ret.set_preserved(regid);
}
// find all uses, ignoring entry/exit register state
let mut uses_iter = ssa
.uses_of(reg_val_entry)
.into_iter()
.filter(|&n| n!= entry_regstate_node && n!= exit_regstate_node);
if uses_iter.next().is_some() {
ret.set_read(regid);
}
}
}
Some(ret)
}
}
fn direct_call_info(
ssa: &SSAStorage,
call_node: <SSAStorage as SSA>::ValueRef,
) -> Option<(u64, RegisterMap<<SSAStorage as SSA>::ValueRef>)> {
let callinfo = utils::call_info(call_node, ssa)?;
Some((ssa.constant(callinfo.target)?, callinfo.register_args))
} | let entry_regstate = utils::register_state_info(entry_regstate_node, ssa);
let exit_regstate = utils::register_state_info(exit_regstate_node, ssa);
let mut ret = reginfo.new_register_usage(); | random_line_split |
infer_regusage.rs | //! Infers how each function uses every register
//! For every function, patch all of its call sites to ignore registers that the
//! callee doesn't read and to preserve register values that the callee
//! preserves. Then, record which registers it reads and which registers it
//! preserves.
//!
//! After this, all functions should have mutable [`regusage`][RadecoFunction::regusage]s.
//!
//! This analysis is super conservative; for example, if a function preserves a
//! register by pushing it onto the stack and popping it back right before
//! returning, it is considered to be read and not preserved because we can't
//! guarantee that that stack location is never subsequently read or modified.
//! See #147 for further discussion
use analysis::analyzer::{
all, Action, Analyzer, AnalyzerInfo, AnalyzerKind, AnalyzerResult, Change, FuncAnalyzer,
ModuleAnalyzer,
};
use analysis::dce::DCE;
use analysis::inst_combine::Combiner;
use frontend::radeco_containers::{RadecoFunction, RadecoModule};
use middle::ir;
use middle::regfile::*;
use middle::ssa::cfg_traits::*;
use middle::ssa::ssa_traits::*;
use middle::ssa::ssastorage::SSAStorage;
use middle::ssa::utils;
use petgraph::visit::{DfsPostOrder, Walker};
use std::any::Any;
use std::collections::{BTreeMap, HashSet};
const NAME: &str = "inferer";
const REQUIRES: &[AnalyzerKind] = &[];
pub const INFO: AnalyzerInfo = AnalyzerInfo {
name: NAME,
kind: AnalyzerKind::Inferer,
requires: REQUIRES,
uses_policy: false,
};
#[derive(Debug)]
pub struct Inferer {
/// Register file of the current architecture.
reginfo: SubRegisterFile,
/// Addresses of the functions we've already analyzed
analyzed: HashSet<u64>,
}
impl Analyzer for Inferer {
fn info(&self) -> &'static AnalyzerInfo {
&INFO
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl ModuleAnalyzer for Inferer {
/// Calls `patch_fn`, `dce::collect`, and `analyze_fn` on every function,
/// callees first
fn analyze<T: FnMut(Box<Change>) -> Action>(
&mut self,
rmod: &mut RadecoModule,
_policy: Option<T>,
) -> Option<Box<AnalyzerResult>> {
// for imports, *ASSUME* that the callconv that r2 says is correct
let mut new_analyzed = Vec::new();
{
let imp_ru_iter = rmod.imports.iter().filter_map(|(&imp_addr, imp_info)| {
let imp_rfn = imp_info.rfn.borrow();
let regusage = self.reginfo.r2callconv_to_register_usage(
imp_rfn.callconv.as_ref()?, // ignore imports without callconvs
&*imp_rfn.callconv_name,
)?;
Some((imp_addr, regusage))
});
for (imp_addr, imp_ru) in imp_ru_iter {
rmod.functions.get_mut(&imp_addr).unwrap().regusage = imp_ru;
new_analyzed.push(imp_addr);
}
}
for func in new_analyzed {
self.analyzed.insert(func);
}
let mut dfs_wi = DfsPostOrder::empty(&rmod.callgraph).iter(&rmod.callgraph);
// pick a function...
for fn_ni in rmod.callgraph.node_indices() {
//... and start a dfs on it
dfs_wi.inner_mut().move_to(fn_ni);
while let Some(fn_to_anal) = dfs_wi.next() {
let fn_addr = rmod.callgraph[fn_to_anal];
// ignore functions already in `call_convs` (probably because its an import)
if!self.analyzed.contains(&fn_addr) {
self.patch_fn(fn_addr, &mut rmod.functions);
let rfn = &mut rmod.functions.get_mut(&fn_addr).unwrap();
let mut dce = DCE::new();
dce.analyze(rfn, Some(all));
let mut combiner = Combiner::new();
combiner.analyze(rfn, Some(all));
let ru = self.analyze_fn(rfn, &self.reginfo).unwrap_or_else(|| {
radeco_err!("Failed to analyze fn: {:?} (@ {:#X})", rfn.name, fn_addr);
// if analysis failed, default to "reads and clobbers everything"
self.reginfo.new_register_usage()
});
rfn.regusage = ru;
self.analyzed.insert(fn_addr);
}
}
}
None
}
}
impl Inferer {
pub fn new(reginfo: SubRegisterFile) -> Inferer {
Inferer {
reginfo: reginfo,
analyzed: HashSet::new(),
}
}
/// Using the callconv info we've gathered so far, patch-up call sites to
/// to remove arguments that the callee doesn't read and make values in
/// callee-saved registers be preserved across the call.
fn patch_fn(&self, fn_addr: u64, fn_map: &mut BTreeMap<u64, RadecoFunction>) -> () |
fn patch_call_node(
&self,
call_node: <SSAStorage as SSA>::ValueRef,
fn_addr: u64,
fn_map: &mut BTreeMap<u64, RadecoFunction>,
) -> Option<()> {
// bail on indirect or weird call
let (call_tgt_addr, call_reg_map) = direct_call_info(fn_map[&fn_addr].ssa(), call_node)?;
// remove unread args
for (regid, &op_node) in &call_reg_map {
if fn_map[&call_tgt_addr].regusage.is_ignored(regid) {
fn_map
.get_mut(&fn_addr)
.unwrap()
.ssa_mut()
.op_unuse(call_node, op_node);
}
}
// bridge preserved registers
for (regid, (use_node, _)) in utils::call_rets(call_node, fn_map[&fn_addr].ssa()) {
if fn_map[&call_tgt_addr].regusage.is_preserved(regid) {
fn_map
.get_mut(&fn_addr)
.unwrap()
.ssa_mut()
.replace_value(use_node, call_reg_map[regid]);
}
}
Some(())
}
fn analyze_fn(&self, rfn: &RadecoFunction, reginfo: &SubRegisterFile) -> Option<RegisterUsage> {
radeco_trace!("analyzing fn: {}", rfn.name);
let ssa = rfn.ssa();
let entry_regstate_node = ssa.registers_in(ssa.entry_node()?)?;
let exit_regstate_node = ssa.registers_in(ssa.exit_node()?)?;
// some registers may not be present in the entry node;
// this means that the function neither reads nor preserves that register
let entry_regstate = utils::register_state_info(entry_regstate_node, ssa);
let exit_regstate = utils::register_state_info(exit_regstate_node, ssa);
let mut ret = reginfo.new_register_usage();
ret.set_all_ignored();
for regid in ssa.regfile.iter_register_ids() {
// ignore registers not in entry regstate
if let Some(&(reg_val_entry, _)) = entry_regstate.get(regid) {
// bail if a register isn't present in exit regstate
let &(reg_val_exit, _) = exit_regstate.get(regid)?;
if reg_val_exit == reg_val_entry {
ret.set_preserved(regid);
}
// find all uses, ignoring entry/exit register state
let mut uses_iter = ssa
.uses_of(reg_val_entry)
.into_iter()
.filter(|&n| n!= entry_regstate_node && n!= exit_regstate_node);
if uses_iter.next().is_some() {
ret.set_read(regid);
}
}
}
Some(ret)
}
}
fn direct_call_info(
ssa: &SSAStorage,
call_node: <SSAStorage as SSA>::ValueRef,
) -> Option<(u64, RegisterMap<<SSAStorage as SSA>::ValueRef>)> {
let callinfo = utils::call_info(call_node, ssa)?;
Some((ssa.constant(callinfo.target)?, callinfo.register_args))
}
| {
radeco_trace!("patching calls in fn: {}", fn_map[&fn_addr].name);
for node in fn_map[&fn_addr].ssa().inorder_walk() {
if let Ok(NodeType::Op(ir::MOpcode::OpCall)) =
fn_map[&fn_addr].ssa().node_data(node).map(|nd| nd.nt)
{
self.patch_call_node(node, fn_addr, fn_map)
.unwrap_or_else(|| {
radeco_warn!(
"failed to remove unused args for call at {:#X}",
fn_map[&fn_addr].ssa().address(node).unwrap()
);
});
}
}
} | identifier_body |
main.rs |
use std::thread;
use std::thread::JoinHandle;
mod factorial;
use factorial::util::t_log;
use factorial::find_factors;
fn get_chunk(index: u64, chunk_size: u64, max: u64) -> Option<(u64, u64)> {
let mut result = None;
let low = chunk_size * (index - 1);
let high = chunk_size * index;
if high <= max {
result = Some((low, high))
} else if low >= max {
// no-op
} else {
result = Some((low, max))
}
result
}
fn main() {
const MAX: u64 = 300;
const CHUNK: u64 = 50;
let mut done = false;
let mut index: u64 = 1;
let mut handles: Vec<JoinHandle<_>> = vec![];
while! done {
let chunk = get_chunk(index, CHUNK, MAX);
if let Some((low, high)) = chunk {
let handle = thread::spawn(move || {
t_log(&format!("TRACER {} {}", low, high));
find_factors(low, high);
});
handles.push(handle);
index += 1;
} else |
}
t_log("waiting in main...");
for handle in handles.into_iter() {
handle.join().unwrap();
}
t_log("Ready.");
}
#[allow(unused_imports)]
mod tests {
use super::*;
#[test]
fn test_get_chunk_low_boundary() {
let index: u64 = 1;
let chunk: u64 = 10;
let max: u64 = 25;
// test
let (low, high) = get_chunk(index, chunk, max).unwrap();
assert_eq!(low, 0);
assert_eq!(high, 10);
}
#[test]
fn test_get_chunk_basic() {
let index: u64 = 2;
let chunk: u64 = 10;
let max: u64 = 25;
// test
let (low, high) = get_chunk(index, chunk, max).unwrap();
assert_eq!(low, 10);
assert_eq!(high, 20);
}
#[test]
fn test_get_chunk_high_boundary() {
let index: u64 = 3;
let chunk: u64 = 10;
let max: u64 = 25;
// test
let (low, high) = get_chunk(index, chunk, max).unwrap();
assert_eq!(low, 20);
assert_eq!(high, 25);
}
#[test]
fn test_get_chunk_out_of_range() {
let index: u64 = 4;
let chunk: u64 = 10;
let max: u64 = 25;
// test
let result = get_chunk(index, chunk, max);
assert_eq!(result.is_none(), true);
}
}
| {
done = true;
} | conditional_block |
main.rs |
use std::thread;
use std::thread::JoinHandle;
mod factorial;
use factorial::util::t_log;
use factorial::find_factors;
fn get_chunk(index: u64, chunk_size: u64, max: u64) -> Option<(u64, u64)> {
let mut result = None;
let low = chunk_size * (index - 1);
let high = chunk_size * index;
if high <= max {
result = Some((low, high))
} else if low >= max {
// no-op
} else {
result = Some((low, max))
}
result
}
fn main() {
const MAX: u64 = 300;
const CHUNK: u64 = 50;
let mut done = false;
let mut index: u64 = 1;
let mut handles: Vec<JoinHandle<_>> = vec![];
while! done {
let chunk = get_chunk(index, CHUNK, MAX);
if let Some((low, high)) = chunk {
let handle = thread::spawn(move || {
t_log(&format!("TRACER {} {}", low, high));
find_factors(low, high);
});
handles.push(handle);
index += 1;
} else {
done = true;
}
}
t_log("waiting in main...");
for handle in handles.into_iter() {
handle.join().unwrap();
}
t_log("Ready.");
}
#[allow(unused_imports)]
mod tests {
use super::*;
#[test]
fn | () {
let index: u64 = 1;
let chunk: u64 = 10;
let max: u64 = 25;
// test
let (low, high) = get_chunk(index, chunk, max).unwrap();
assert_eq!(low, 0);
assert_eq!(high, 10);
}
#[test]
fn test_get_chunk_basic() {
let index: u64 = 2;
let chunk: u64 = 10;
let max: u64 = 25;
// test
let (low, high) = get_chunk(index, chunk, max).unwrap();
assert_eq!(low, 10);
assert_eq!(high, 20);
}
#[test]
fn test_get_chunk_high_boundary() {
let index: u64 = 3;
let chunk: u64 = 10;
let max: u64 = 25;
// test
let (low, high) = get_chunk(index, chunk, max).unwrap();
assert_eq!(low, 20);
assert_eq!(high, 25);
}
#[test]
fn test_get_chunk_out_of_range() {
let index: u64 = 4;
let chunk: u64 = 10;
let max: u64 = 25;
// test
let result = get_chunk(index, chunk, max);
assert_eq!(result.is_none(), true);
}
}
| test_get_chunk_low_boundary | identifier_name |
main.rs | use std::thread;
use std::thread::JoinHandle;
mod factorial;
use factorial::util::t_log;
use factorial::find_factors;
fn get_chunk(index: u64, chunk_size: u64, max: u64) -> Option<(u64, u64)> {
let mut result = None;
let low = chunk_size * (index - 1);
let high = chunk_size * index;
if high <= max {
result = Some((low, high))
} else if low >= max {
// no-op
} else {
result = Some((low, max))
}
result
}
fn main() {
const MAX: u64 = 300;
const CHUNK: u64 = 50;
let mut done = false;
let mut index: u64 = 1;
let mut handles: Vec<JoinHandle<_>> = vec![];
while! done {
let chunk = get_chunk(index, CHUNK, MAX);
if let Some((low, high)) = chunk {
let handle = thread::spawn(move || {
t_log(&format!("TRACER {} {}", low, high));
find_factors(low, high);
});
handles.push(handle);
index += 1;
} else {
done = true;
}
}
t_log("waiting in main...");
for handle in handles.into_iter() {
handle.join().unwrap();
}
t_log("Ready.");
}
#[allow(unused_imports)]
mod tests {
use super::*;
#[test]
fn test_get_chunk_low_boundary() {
let index: u64 = 1;
let chunk: u64 = 10;
let max: u64 = 25;
// test
let (low, high) = get_chunk(index, chunk, max).unwrap();
assert_eq!(low, 0);
assert_eq!(high, 10);
}
#[test]
fn test_get_chunk_basic() {
let index: u64 = 2;
let chunk: u64 = 10;
let max: u64 = 25;
// test
let (low, high) = get_chunk(index, chunk, max).unwrap();
assert_eq!(low, 10);
assert_eq!(high, 20);
}
#[test]
fn test_get_chunk_high_boundary() {
let index: u64 = 3;
let chunk: u64 = 10;
let max: u64 = 25;
// test
let (low, high) = get_chunk(index, chunk, max).unwrap();
assert_eq!(low, 20);
assert_eq!(high, 25);
}
#[test]
fn test_get_chunk_out_of_range() {
let index: u64 = 4;
let chunk: u64 = 10;
let max: u64 = 25;
// test |
assert_eq!(result.is_none(), true);
}
} | let result = get_chunk(index, chunk, max); | random_line_split |
main.rs |
use std::thread;
use std::thread::JoinHandle;
mod factorial;
use factorial::util::t_log;
use factorial::find_factors;
fn get_chunk(index: u64, chunk_size: u64, max: u64) -> Option<(u64, u64)> {
let mut result = None;
let low = chunk_size * (index - 1);
let high = chunk_size * index;
if high <= max {
result = Some((low, high))
} else if low >= max {
// no-op
} else {
result = Some((low, max))
}
result
}
fn main() {
const MAX: u64 = 300;
const CHUNK: u64 = 50;
let mut done = false;
let mut index: u64 = 1;
let mut handles: Vec<JoinHandle<_>> = vec![];
while! done {
let chunk = get_chunk(index, CHUNK, MAX);
if let Some((low, high)) = chunk {
let handle = thread::spawn(move || {
t_log(&format!("TRACER {} {}", low, high));
find_factors(low, high);
});
handles.push(handle);
index += 1;
} else {
done = true;
}
}
t_log("waiting in main...");
for handle in handles.into_iter() {
handle.join().unwrap();
}
t_log("Ready.");
}
#[allow(unused_imports)]
mod tests {
use super::*;
#[test]
fn test_get_chunk_low_boundary() {
let index: u64 = 1;
let chunk: u64 = 10;
let max: u64 = 25;
// test
let (low, high) = get_chunk(index, chunk, max).unwrap();
assert_eq!(low, 0);
assert_eq!(high, 10);
}
#[test]
fn test_get_chunk_basic() {
let index: u64 = 2;
let chunk: u64 = 10;
let max: u64 = 25;
// test
let (low, high) = get_chunk(index, chunk, max).unwrap();
assert_eq!(low, 10);
assert_eq!(high, 20);
}
#[test]
fn test_get_chunk_high_boundary() {
let index: u64 = 3;
let chunk: u64 = 10;
let max: u64 = 25;
// test
let (low, high) = get_chunk(index, chunk, max).unwrap();
assert_eq!(low, 20);
assert_eq!(high, 25);
}
#[test]
fn test_get_chunk_out_of_range() |
}
| {
let index: u64 = 4;
let chunk: u64 = 10;
let max: u64 = 25;
// test
let result = get_chunk(index, chunk, max);
assert_eq!(result.is_none(), true);
} | identifier_body |
mod.rs | pub use self::os::{FNM_NOMATCH};
pub use self::os::{FNM_PATHNAME};
pub use self::os::{FNM_PERIOD};
pub use self::os::{FNM_NOESCAPE};
use {NTStr, int_t, char_t};
#[cfg(target_os = "linux")]
#[path = "linux/mod.rs"]
mod os;
pub fn fnmatch<T: NTStr, U: NTStr>(pattern: &T, string: &U, flags: int_t) -> int_t {
extern {
fn fnmatch(pattern: *const char_t, name: *const char_t, flags: int_t) -> int_t;
}
unsafe { fnmatch(pattern.as_ptr(), string.as_ptr(), flags) }
}
#[cfg(test)]
mod tests {
use {ToNTStr};
#[test]
fn test() |
}
| {
let pat = "abc*123".to_nt_str();
let stn = "abcTE/ST123".to_nt_str();
let pat2 = "*123".to_nt_str();
let stn2 = ".123".to_nt_str();
assert_eq!(super::fnmatch(&pat, &stn, 0), 0);
assert_eq!(super::fnmatch(&pat, &stn, super::FNM_PATHNAME), super::FNM_NOMATCH);
assert_eq!(super::fnmatch(&pat2, &stn2, super::FNM_PATHNAME), 0);
assert_eq!(super::fnmatch(&pat, &stn, super::FNM_PERIOD), 0);
assert_eq!(super::fnmatch(&pat2, &stn2, super::FNM_PERIOD), super::FNM_NOMATCH);
} | identifier_body |
mod.rs | pub use self::os::{FNM_NOMATCH};
pub use self::os::{FNM_PATHNAME};
pub use self::os::{FNM_PERIOD};
pub use self::os::{FNM_NOESCAPE};
use {NTStr, int_t, char_t};
#[cfg(target_os = "linux")]
#[path = "linux/mod.rs"]
mod os;
pub fn fnmatch<T: NTStr, U: NTStr>(pattern: &T, string: &U, flags: int_t) -> int_t {
extern {
fn fnmatch(pattern: *const char_t, name: *const char_t, flags: int_t) -> int_t;
} | unsafe { fnmatch(pattern.as_ptr(), string.as_ptr(), flags) }
}
#[cfg(test)]
mod tests {
use {ToNTStr};
#[test]
fn test() {
let pat = "abc*123".to_nt_str();
let stn = "abcTE/ST123".to_nt_str();
let pat2 = "*123".to_nt_str();
let stn2 = ".123".to_nt_str();
assert_eq!(super::fnmatch(&pat, &stn, 0), 0);
assert_eq!(super::fnmatch(&pat, &stn, super::FNM_PATHNAME), super::FNM_NOMATCH);
assert_eq!(super::fnmatch(&pat2, &stn2, super::FNM_PATHNAME), 0);
assert_eq!(super::fnmatch(&pat, &stn, super::FNM_PERIOD), 0);
assert_eq!(super::fnmatch(&pat2, &stn2, super::FNM_PERIOD), super::FNM_NOMATCH);
}
} | random_line_split |
|
mod.rs | pub use self::os::{FNM_NOMATCH};
pub use self::os::{FNM_PATHNAME};
pub use self::os::{FNM_PERIOD};
pub use self::os::{FNM_NOESCAPE};
use {NTStr, int_t, char_t};
#[cfg(target_os = "linux")]
#[path = "linux/mod.rs"]
mod os;
pub fn | <T: NTStr, U: NTStr>(pattern: &T, string: &U, flags: int_t) -> int_t {
extern {
fn fnmatch(pattern: *const char_t, name: *const char_t, flags: int_t) -> int_t;
}
unsafe { fnmatch(pattern.as_ptr(), string.as_ptr(), flags) }
}
#[cfg(test)]
mod tests {
use {ToNTStr};
#[test]
fn test() {
let pat = "abc*123".to_nt_str();
let stn = "abcTE/ST123".to_nt_str();
let pat2 = "*123".to_nt_str();
let stn2 = ".123".to_nt_str();
assert_eq!(super::fnmatch(&pat, &stn, 0), 0);
assert_eq!(super::fnmatch(&pat, &stn, super::FNM_PATHNAME), super::FNM_NOMATCH);
assert_eq!(super::fnmatch(&pat2, &stn2, super::FNM_PATHNAME), 0);
assert_eq!(super::fnmatch(&pat, &stn, super::FNM_PERIOD), 0);
assert_eq!(super::fnmatch(&pat2, &stn2, super::FNM_PERIOD), super::FNM_NOMATCH);
}
}
| fnmatch | identifier_name |
calls.rs | use crate::types::{Error, Params, Value};
use crate::BoxFuture;
use std::fmt;
use std::future::Future;
use std::sync::Arc;
/// Metadata trait
pub trait Metadata: Clone + Send +'static {}
impl Metadata for () {}
impl<T: Metadata> Metadata for Option<T> {}
impl<T: Metadata> Metadata for Box<T> {}
impl<T: Sync + Send +'static> Metadata for Arc<T> {}
/// A future-conversion trait.
pub trait WrapFuture<T, E> {
/// Convert itself into a boxed future.
fn into_future(self) -> BoxFuture<Result<T, E>>;
}
impl<T: Send +'static, E: Send +'static> WrapFuture<T, E> for Result<T, E> {
fn into_future(self) -> BoxFuture<Result<T, E>> {
Box::pin(async { self })
}
}
impl<T, E> WrapFuture<T, E> for BoxFuture<Result<T, E>> {
fn | (self) -> BoxFuture<Result<T, E>> {
self
}
}
/// A synchronous or asynchronous method.
pub trait RpcMethodSync: Send + Sync +'static {
/// Call method
fn call(&self, params: Params) -> BoxFuture<crate::Result<Value>>;
}
/// Asynchronous Method
pub trait RpcMethodSimple: Send + Sync +'static {
/// Output future
type Out: Future<Output = Result<Value, Error>> + Send;
/// Call method
fn call(&self, params: Params) -> Self::Out;
}
/// Asynchronous Method with Metadata
pub trait RpcMethod<T: Metadata>: Send + Sync +'static {
/// Call method
fn call(&self, params: Params, meta: T) -> BoxFuture<crate::Result<Value>>;
}
/// Notification
pub trait RpcNotificationSimple: Send + Sync +'static {
/// Execute notification
fn execute(&self, params: Params);
}
/// Notification with Metadata
pub trait RpcNotification<T: Metadata>: Send + Sync +'static {
/// Execute notification
fn execute(&self, params: Params, meta: T);
}
/// Possible Remote Procedures with Metadata
#[derive(Clone)]
pub enum RemoteProcedure<T: Metadata> {
/// A method call
Method(Arc<dyn RpcMethod<T>>),
/// A notification
Notification(Arc<dyn RpcNotification<T>>),
/// An alias to other method,
Alias(String),
}
impl<T: Metadata> fmt::Debug for RemoteProcedure<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use self::RemoteProcedure::*;
match *self {
Method(..) => write!(fmt, "<method>"),
Notification(..) => write!(fmt, "<notification>"),
Alias(ref alias) => write!(fmt, "alias => {:?}", alias),
}
}
}
impl<F: Send + Sync +'static, X: Send +'static> RpcMethodSimple for F
where
F: Fn(Params) -> X,
X: Future<Output = Result<Value, Error>>,
{
type Out = X;
fn call(&self, params: Params) -> Self::Out {
self(params)
}
}
impl<F: Send + Sync +'static, X: Send +'static> RpcMethodSync for F
where
F: Fn(Params) -> X,
X: WrapFuture<Value, Error>,
{
fn call(&self, params: Params) -> BoxFuture<crate::Result<Value>> {
self(params).into_future()
}
}
impl<F: Send + Sync +'static> RpcNotificationSimple for F
where
F: Fn(Params),
{
fn execute(&self, params: Params) {
self(params)
}
}
impl<F: Send + Sync +'static, X: Send +'static, T> RpcMethod<T> for F
where
T: Metadata,
F: Fn(Params, T) -> X,
X: Future<Output = Result<Value, Error>>,
{
fn call(&self, params: Params, meta: T) -> BoxFuture<crate::Result<Value>> {
Box::pin(self(params, meta))
}
}
impl<F: Send + Sync +'static, T> RpcNotification<T> for F
where
T: Metadata,
F: Fn(Params, T),
{
fn execute(&self, params: Params, meta: T) {
self(params, meta)
}
}
| into_future | identifier_name |
calls.rs | use crate::types::{Error, Params, Value};
use crate::BoxFuture;
use std::fmt;
use std::future::Future;
use std::sync::Arc;
/// Metadata trait
pub trait Metadata: Clone + Send +'static {}
impl Metadata for () {} | /// A future-conversion trait.
pub trait WrapFuture<T, E> {
/// Convert itself into a boxed future.
fn into_future(self) -> BoxFuture<Result<T, E>>;
}
impl<T: Send +'static, E: Send +'static> WrapFuture<T, E> for Result<T, E> {
fn into_future(self) -> BoxFuture<Result<T, E>> {
Box::pin(async { self })
}
}
impl<T, E> WrapFuture<T, E> for BoxFuture<Result<T, E>> {
fn into_future(self) -> BoxFuture<Result<T, E>> {
self
}
}
/// A synchronous or asynchronous method.
pub trait RpcMethodSync: Send + Sync +'static {
/// Call method
fn call(&self, params: Params) -> BoxFuture<crate::Result<Value>>;
}
/// Asynchronous Method
pub trait RpcMethodSimple: Send + Sync +'static {
/// Output future
type Out: Future<Output = Result<Value, Error>> + Send;
/// Call method
fn call(&self, params: Params) -> Self::Out;
}
/// Asynchronous Method with Metadata
pub trait RpcMethod<T: Metadata>: Send + Sync +'static {
/// Call method
fn call(&self, params: Params, meta: T) -> BoxFuture<crate::Result<Value>>;
}
/// Notification
pub trait RpcNotificationSimple: Send + Sync +'static {
/// Execute notification
fn execute(&self, params: Params);
}
/// Notification with Metadata
pub trait RpcNotification<T: Metadata>: Send + Sync +'static {
/// Execute notification
fn execute(&self, params: Params, meta: T);
}
/// Possible Remote Procedures with Metadata
#[derive(Clone)]
pub enum RemoteProcedure<T: Metadata> {
/// A method call
Method(Arc<dyn RpcMethod<T>>),
/// A notification
Notification(Arc<dyn RpcNotification<T>>),
/// An alias to other method,
Alias(String),
}
impl<T: Metadata> fmt::Debug for RemoteProcedure<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use self::RemoteProcedure::*;
match *self {
Method(..) => write!(fmt, "<method>"),
Notification(..) => write!(fmt, "<notification>"),
Alias(ref alias) => write!(fmt, "alias => {:?}", alias),
}
}
}
impl<F: Send + Sync +'static, X: Send +'static> RpcMethodSimple for F
where
F: Fn(Params) -> X,
X: Future<Output = Result<Value, Error>>,
{
type Out = X;
fn call(&self, params: Params) -> Self::Out {
self(params)
}
}
impl<F: Send + Sync +'static, X: Send +'static> RpcMethodSync for F
where
F: Fn(Params) -> X,
X: WrapFuture<Value, Error>,
{
fn call(&self, params: Params) -> BoxFuture<crate::Result<Value>> {
self(params).into_future()
}
}
impl<F: Send + Sync +'static> RpcNotificationSimple for F
where
F: Fn(Params),
{
fn execute(&self, params: Params) {
self(params)
}
}
impl<F: Send + Sync +'static, X: Send +'static, T> RpcMethod<T> for F
where
T: Metadata,
F: Fn(Params, T) -> X,
X: Future<Output = Result<Value, Error>>,
{
fn call(&self, params: Params, meta: T) -> BoxFuture<crate::Result<Value>> {
Box::pin(self(params, meta))
}
}
impl<F: Send + Sync +'static, T> RpcNotification<T> for F
where
T: Metadata,
F: Fn(Params, T),
{
fn execute(&self, params: Params, meta: T) {
self(params, meta)
}
} | impl<T: Metadata> Metadata for Option<T> {}
impl<T: Metadata> Metadata for Box<T> {}
impl<T: Sync + Send + 'static> Metadata for Arc<T> {}
| random_line_split |
calls.rs | use crate::types::{Error, Params, Value};
use crate::BoxFuture;
use std::fmt;
use std::future::Future;
use std::sync::Arc;
/// Metadata trait
pub trait Metadata: Clone + Send +'static {}
impl Metadata for () {}
impl<T: Metadata> Metadata for Option<T> {}
impl<T: Metadata> Metadata for Box<T> {}
impl<T: Sync + Send +'static> Metadata for Arc<T> {}
/// A future-conversion trait.
pub trait WrapFuture<T, E> {
/// Convert itself into a boxed future.
fn into_future(self) -> BoxFuture<Result<T, E>>;
}
impl<T: Send +'static, E: Send +'static> WrapFuture<T, E> for Result<T, E> {
fn into_future(self) -> BoxFuture<Result<T, E>> {
Box::pin(async { self })
}
}
impl<T, E> WrapFuture<T, E> for BoxFuture<Result<T, E>> {
fn into_future(self) -> BoxFuture<Result<T, E>> {
self
}
}
/// A synchronous or asynchronous method.
pub trait RpcMethodSync: Send + Sync +'static {
/// Call method
fn call(&self, params: Params) -> BoxFuture<crate::Result<Value>>;
}
/// Asynchronous Method
pub trait RpcMethodSimple: Send + Sync +'static {
/// Output future
type Out: Future<Output = Result<Value, Error>> + Send;
/// Call method
fn call(&self, params: Params) -> Self::Out;
}
/// Asynchronous Method with Metadata
pub trait RpcMethod<T: Metadata>: Send + Sync +'static {
/// Call method
fn call(&self, params: Params, meta: T) -> BoxFuture<crate::Result<Value>>;
}
/// Notification
pub trait RpcNotificationSimple: Send + Sync +'static {
/// Execute notification
fn execute(&self, params: Params);
}
/// Notification with Metadata
pub trait RpcNotification<T: Metadata>: Send + Sync +'static {
/// Execute notification
fn execute(&self, params: Params, meta: T);
}
/// Possible Remote Procedures with Metadata
#[derive(Clone)]
pub enum RemoteProcedure<T: Metadata> {
/// A method call
Method(Arc<dyn RpcMethod<T>>),
/// A notification
Notification(Arc<dyn RpcNotification<T>>),
/// An alias to other method,
Alias(String),
}
impl<T: Metadata> fmt::Debug for RemoteProcedure<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use self::RemoteProcedure::*;
match *self {
Method(..) => write!(fmt, "<method>"),
Notification(..) => write!(fmt, "<notification>"),
Alias(ref alias) => write!(fmt, "alias => {:?}", alias),
}
}
}
impl<F: Send + Sync +'static, X: Send +'static> RpcMethodSimple for F
where
F: Fn(Params) -> X,
X: Future<Output = Result<Value, Error>>,
{
type Out = X;
fn call(&self, params: Params) -> Self::Out {
self(params)
}
}
impl<F: Send + Sync +'static, X: Send +'static> RpcMethodSync for F
where
F: Fn(Params) -> X,
X: WrapFuture<Value, Error>,
{
fn call(&self, params: Params) -> BoxFuture<crate::Result<Value>> {
self(params).into_future()
}
}
impl<F: Send + Sync +'static> RpcNotificationSimple for F
where
F: Fn(Params),
{
fn execute(&self, params: Params) {
self(params)
}
}
impl<F: Send + Sync +'static, X: Send +'static, T> RpcMethod<T> for F
where
T: Metadata,
F: Fn(Params, T) -> X,
X: Future<Output = Result<Value, Error>>,
{
fn call(&self, params: Params, meta: T) -> BoxFuture<crate::Result<Value>> |
}
impl<F: Send + Sync +'static, T> RpcNotification<T> for F
where
T: Metadata,
F: Fn(Params, T),
{
fn execute(&self, params: Params, meta: T) {
self(params, meta)
}
}
| {
Box::pin(self(params, meta))
} | identifier_body |
mutex.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use cell::UnsafeCell;
#[cfg(feature = "thread")]
use sys::sync as ffi;
use sys_common::mutex;
#[cfg(feature = "thread")]
pub struct Mutex { inner: UnsafeCell<ffi::pthread_mutex_t> }
#[inline]
#[cfg(feature = "thread")]
pub unsafe fn raw(m: &Mutex) -> *mut ffi::pthread_mutex_t {
m.inner.get()
}
#[cfg(feature = "thread")]
pub const MUTEX_INIT: Mutex = Mutex {
inner: UnsafeCell { value: ffi::PTHREAD_MUTEX_INITIALIZER },
};
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
#[cfg(feature = "thread")]
impl Mutex {
#[inline]
pub unsafe fn new() -> Mutex {
// Might be moved and address is changing it is better to avoid
// initialization of potentially opaque OS data before it landed
MUTEX_INIT
}
#[inline]
pub unsafe fn lock(&self) {
let r = ffi::pthread_mutex_lock(self.inner.get());
debug_assert_eq!(r, 0);
}
#[inline]
pub unsafe fn unlock(&self) {
let r = ffi::pthread_mutex_unlock(self.inner.get());
debug_assert_eq!(r, 0);
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
ffi::pthread_mutex_trylock(self.inner.get()) == 0
}
#[inline]
#[cfg(not(target_os = "dragonfly"))] | }
#[inline]
#[cfg(target_os = "dragonfly")]
pub unsafe fn destroy(&self) {
use libc;
let r = ffi::pthread_mutex_destroy(self.inner.get());
// On DragonFly pthread_mutex_destroy() returns EINVAL if called on a
// mutex that was just initialized with ffi::PTHREAD_MUTEX_INITIALIZER.
// Once it is used (locked/unlocked) or pthread_mutex_init() is called,
// this behaviour no longer occurs.
debug_assert!(r == 0 || r == libc::EINVAL);
}
}
#[cfg(not(feature = "thread"))]
pub struct Mutex;
#[cfg(not(feature = "thread"))]
impl Mutex {
#[inline]
pub unsafe fn new() -> Mutex {
Mutex
}
#[inline]
pub unsafe fn lock(&self) {
}
#[inline]
pub unsafe fn unlock(&self) {
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
true
}
#[inline]
pub unsafe fn destroy(&self) {
}
}
#[cfg(not(feature = "thread"))]
pub const MUTEX_INIT: Mutex = Mutex; | pub unsafe fn destroy(&self) {
let r = ffi::pthread_mutex_destroy(self.inner.get());
debug_assert_eq!(r, 0); | random_line_split |
mutex.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use cell::UnsafeCell;
#[cfg(feature = "thread")]
use sys::sync as ffi;
use sys_common::mutex;
#[cfg(feature = "thread")]
pub struct Mutex { inner: UnsafeCell<ffi::pthread_mutex_t> }
#[inline]
#[cfg(feature = "thread")]
pub unsafe fn raw(m: &Mutex) -> *mut ffi::pthread_mutex_t {
m.inner.get()
}
#[cfg(feature = "thread")]
pub const MUTEX_INIT: Mutex = Mutex {
inner: UnsafeCell { value: ffi::PTHREAD_MUTEX_INITIALIZER },
};
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
#[cfg(feature = "thread")]
impl Mutex {
#[inline]
pub unsafe fn new() -> Mutex {
// Might be moved and address is changing it is better to avoid
// initialization of potentially opaque OS data before it landed
MUTEX_INIT
}
#[inline]
pub unsafe fn lock(&self) {
let r = ffi::pthread_mutex_lock(self.inner.get());
debug_assert_eq!(r, 0);
}
#[inline]
pub unsafe fn unlock(&self) {
let r = ffi::pthread_mutex_unlock(self.inner.get());
debug_assert_eq!(r, 0);
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
ffi::pthread_mutex_trylock(self.inner.get()) == 0
}
#[inline]
#[cfg(not(target_os = "dragonfly"))]
pub unsafe fn destroy(&self) {
let r = ffi::pthread_mutex_destroy(self.inner.get());
debug_assert_eq!(r, 0);
}
#[inline]
#[cfg(target_os = "dragonfly")]
pub unsafe fn destroy(&self) {
use libc;
let r = ffi::pthread_mutex_destroy(self.inner.get());
// On DragonFly pthread_mutex_destroy() returns EINVAL if called on a
// mutex that was just initialized with ffi::PTHREAD_MUTEX_INITIALIZER.
// Once it is used (locked/unlocked) or pthread_mutex_init() is called,
// this behaviour no longer occurs.
debug_assert!(r == 0 || r == libc::EINVAL);
}
}
#[cfg(not(feature = "thread"))]
pub struct Mutex;
#[cfg(not(feature = "thread"))]
impl Mutex {
#[inline]
pub unsafe fn new() -> Mutex {
Mutex
}
#[inline]
pub unsafe fn lock(&self) {
}
#[inline]
pub unsafe fn unlock(&self) {
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
true
}
#[inline]
pub unsafe fn | (&self) {
}
}
#[cfg(not(feature = "thread"))]
pub const MUTEX_INIT: Mutex = Mutex;
| destroy | identifier_name |
mod.rs | // Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
mod engine;
mod engine_factory;
mod mvcc;
mod storage;
mod txn;
use std::fmt; | use self::engine::bench_engine;
use self::engine_factory::{BTreeEngineFactory, EngineFactory, RocksEngineFactory};
use self::mvcc::bench_mvcc;
use self::storage::bench_storage;
use self::txn::bench_txn;
use criterion::Criterion;
use tikv::storage::Engine;
const DEFAULT_ITERATIONS: usize = 10;
const DEFAULT_KEY_LENGTHS: [usize; 1] = [64];
const DEFAULT_VALUE_LENGTHS: [usize; 2] = [64, 65];
const DEFAULT_KV_GENERATOR_SEED: u64 = 0;
#[derive(Clone)]
pub struct BenchConfig<F> {
pub key_length: usize,
pub value_length: usize,
pub engine_factory: F,
}
impl<F: fmt::Debug> fmt::Debug for BenchConfig<F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{:?}_KL{:?}_VL{:?}",
self.engine_factory, self.key_length, self.value_length
)
}
}
pub fn load_configs<E: Engine, F: EngineFactory<E>>(engine_factory: F) -> Vec<BenchConfig<F>> {
let key_lengths = DEFAULT_KEY_LENGTHS;
let value_lengths = DEFAULT_VALUE_LENGTHS;
let mut configs = vec![];
for &kl in &key_lengths {
for &vl in &value_lengths {
configs.push(BenchConfig {
key_length: kl,
value_length: vl,
engine_factory,
})
}
}
configs
}
fn main() {
let mut c = Criterion::default().configure_from_args();
let btree_engine_configs = load_configs(BTreeEngineFactory {});
let rocks_engine_configs = load_configs(RocksEngineFactory {});
bench_engine(&mut c, &btree_engine_configs);
bench_engine(&mut c, &rocks_engine_configs);
bench_mvcc(&mut c, &btree_engine_configs);
bench_mvcc(&mut c, &rocks_engine_configs);
bench_txn(&mut c, &btree_engine_configs);
bench_txn(&mut c, &rocks_engine_configs);
bench_storage(&mut c, &btree_engine_configs);
bench_storage(&mut c, &rocks_engine_configs);
c.final_summary();
} | random_line_split |
|
mod.rs | // Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
mod engine;
mod engine_factory;
mod mvcc;
mod storage;
mod txn;
use std::fmt;
use self::engine::bench_engine;
use self::engine_factory::{BTreeEngineFactory, EngineFactory, RocksEngineFactory};
use self::mvcc::bench_mvcc;
use self::storage::bench_storage;
use self::txn::bench_txn;
use criterion::Criterion;
use tikv::storage::Engine;
const DEFAULT_ITERATIONS: usize = 10;
const DEFAULT_KEY_LENGTHS: [usize; 1] = [64];
const DEFAULT_VALUE_LENGTHS: [usize; 2] = [64, 65];
const DEFAULT_KV_GENERATOR_SEED: u64 = 0;
#[derive(Clone)]
pub struct | <F> {
pub key_length: usize,
pub value_length: usize,
pub engine_factory: F,
}
impl<F: fmt::Debug> fmt::Debug for BenchConfig<F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{:?}_KL{:?}_VL{:?}",
self.engine_factory, self.key_length, self.value_length
)
}
}
pub fn load_configs<E: Engine, F: EngineFactory<E>>(engine_factory: F) -> Vec<BenchConfig<F>> {
let key_lengths = DEFAULT_KEY_LENGTHS;
let value_lengths = DEFAULT_VALUE_LENGTHS;
let mut configs = vec![];
for &kl in &key_lengths {
for &vl in &value_lengths {
configs.push(BenchConfig {
key_length: kl,
value_length: vl,
engine_factory,
})
}
}
configs
}
fn main() {
let mut c = Criterion::default().configure_from_args();
let btree_engine_configs = load_configs(BTreeEngineFactory {});
let rocks_engine_configs = load_configs(RocksEngineFactory {});
bench_engine(&mut c, &btree_engine_configs);
bench_engine(&mut c, &rocks_engine_configs);
bench_mvcc(&mut c, &btree_engine_configs);
bench_mvcc(&mut c, &rocks_engine_configs);
bench_txn(&mut c, &btree_engine_configs);
bench_txn(&mut c, &rocks_engine_configs);
bench_storage(&mut c, &btree_engine_configs);
bench_storage(&mut c, &rocks_engine_configs);
c.final_summary();
}
| BenchConfig | identifier_name |
mod.rs | // Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
mod engine;
mod engine_factory;
mod mvcc;
mod storage;
mod txn;
use std::fmt;
use self::engine::bench_engine;
use self::engine_factory::{BTreeEngineFactory, EngineFactory, RocksEngineFactory};
use self::mvcc::bench_mvcc;
use self::storage::bench_storage;
use self::txn::bench_txn;
use criterion::Criterion;
use tikv::storage::Engine;
const DEFAULT_ITERATIONS: usize = 10;
const DEFAULT_KEY_LENGTHS: [usize; 1] = [64];
const DEFAULT_VALUE_LENGTHS: [usize; 2] = [64, 65];
const DEFAULT_KV_GENERATOR_SEED: u64 = 0;
#[derive(Clone)]
pub struct BenchConfig<F> {
pub key_length: usize,
pub value_length: usize,
pub engine_factory: F,
}
impl<F: fmt::Debug> fmt::Debug for BenchConfig<F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{:?}_KL{:?}_VL{:?}",
self.engine_factory, self.key_length, self.value_length
)
}
}
pub fn load_configs<E: Engine, F: EngineFactory<E>>(engine_factory: F) -> Vec<BenchConfig<F>> |
fn main() {
let mut c = Criterion::default().configure_from_args();
let btree_engine_configs = load_configs(BTreeEngineFactory {});
let rocks_engine_configs = load_configs(RocksEngineFactory {});
bench_engine(&mut c, &btree_engine_configs);
bench_engine(&mut c, &rocks_engine_configs);
bench_mvcc(&mut c, &btree_engine_configs);
bench_mvcc(&mut c, &rocks_engine_configs);
bench_txn(&mut c, &btree_engine_configs);
bench_txn(&mut c, &rocks_engine_configs);
bench_storage(&mut c, &btree_engine_configs);
bench_storage(&mut c, &rocks_engine_configs);
c.final_summary();
}
| {
let key_lengths = DEFAULT_KEY_LENGTHS;
let value_lengths = DEFAULT_VALUE_LENGTHS;
let mut configs = vec![];
for &kl in &key_lengths {
for &vl in &value_lengths {
configs.push(BenchConfig {
key_length: kl,
value_length: vl,
engine_factory,
})
}
}
configs
} | identifier_body |
array.rs | //! Helper Module to treat Vec and fixed sized arrays as generic in some contexts
use crate::linear_algebra::Vector;
/// This trait is used to make up for the lack of generics over array lengths
pub trait Array {
/// Element type of the array
type Element;
/// Corresponding Vector type with same dimension
type Vector: Vector;
/// Number of elements within the array
fn length(&self) -> usize;
/// Access element by immutable reference
fn at_ref(&self, index: usize) -> &Self::Element;
/// Access element by mutable reference
fn at_mut(&mut self, index: usize) -> &mut Self::Element;
}
macro_rules! array_impl_for {
($v:expr) => {
impl<T> Array for [T; $v] {
type Element = T;
type Vector = [f64; $v]; | }
fn at_ref(&self, index: usize) -> &T {
&self[index]
}
fn at_mut(&mut self, index: usize) -> &mut T {
&mut self[index]
}
}
};
}
array_impl_for! { 1 }
array_impl_for! { 2 }
array_impl_for! { 3 }
array_impl_for! { 4 }
array_impl_for! { 5 }
array_impl_for! { 6 }
array_impl_for! { 7 }
array_impl_for! { 8 }
array_impl_for! { 9 }
array_impl_for! { 10 }
array_impl_for! { 11 }
array_impl_for! { 12 }
array_impl_for! { 13 }
array_impl_for! { 14 }
array_impl_for! { 15 }
array_impl_for! { 16 }
array_impl_for! { 17 }
array_impl_for! { 18 }
array_impl_for! { 19 }
array_impl_for! { 20 }
array_impl_for! { 21 }
array_impl_for! { 22 }
array_impl_for! { 23 }
array_impl_for! { 24 }
array_impl_for! { 25 }
array_impl_for! { 26 }
array_impl_for! { 27 }
array_impl_for! { 28 }
array_impl_for! { 29 }
array_impl_for! { 30 }
array_impl_for! { 31 }
array_impl_for! { 32 } |
fn length(&self) -> usize {
$v | random_line_split |
lib.rs | use wasm_bindgen::prelude::*;
#[rustfmt::skip]
#[wasm_bindgen]
pub fn | (
r: f64,
g: f64,
b: f64,
a: f64,
ref_x: f64,
ref_y: f64,
ref_z: f64,
) -> Vec<f64> {
let mut r = r / 255.0;
let mut g = g / 255.0;
let mut b = b / 255.0;
// Need lto = true in Cargo.toml to link pow
r = if r > 0.04045 { ((r + 0.055) / 1.055).powf(2.4) } else { r / 12.92 };
g = if g > 0.04045 { ((g + 0.055) / 1.055).powf(2.4) } else { g / 12.92 };
b = if b > 0.04045 { ((b + 0.055) / 1.055).powf(2.4) } else { b / 12.92 };
r *= 100.0;
g *= 100.0;
b *= 100.0;
// Observer= 2° (Only use CIE 1931!)
let mut x = r * 0.4124 + g * 0.3576 + b * 0.1805;
let mut y = r * 0.2126 + g * 0.7152 + b * 0.0722;
let mut z = r * 0.0193 + g * 0.1192 + b * 0.9505;
x /= ref_x;
y /= ref_y;
z /= ref_z;
x = if x > 0.008856 { x.powf(1.0 / 3.0) } else { x * 7.787 + 16.0 / 116.0 };
y = if y > 0.008856 { y.powf(1.0 / 3.0) } else { y * 7.787 + 16.0 / 116.0 };
z = if z > 0.008856 { z.powf(1.0 / 3.0) } else { z * 7.787 + 16.0 / 116.0 };
let out_l = 116.0 * y - 16.0;
let out_a = 500.0 * (x - y);
let out_b = 200.0 * (y - z);
vec![out_l, out_a, out_b, a]
}
#[wasm_bindgen]
pub fn rgba_laba_distance(
r1: f64,
g1: f64,
b1: f64,
a1: f64,
r2: f64,
g2: f64,
b2: f64,
a2: f64,
ref_x: f64,
ref_y: f64,
ref_z: f64,
) -> f64 {
let left = rgba2laba(r1, g1, b1, a1, ref_x, ref_y, ref_z);
let right = rgba2laba(r2, g2, b2, a2, ref_x, ref_y, ref_z);
let dist = ((right[0] - left[0]).powf(2.0) + (right[1] - left[1]).powf(2.0) + (right[2] - left[2]).powf(2.0)).sqrt();
dist
}
| rgba2laba | identifier_name |
lib.rs | use wasm_bindgen::prelude::*;
#[rustfmt::skip]
#[wasm_bindgen]
pub fn rgba2laba(
r: f64,
g: f64,
b: f64,
a: f64,
ref_x: f64,
ref_y: f64,
ref_z: f64,
) -> Vec<f64> | y /= ref_y;
z /= ref_z;
x = if x > 0.008856 { x.powf(1.0 / 3.0) } else { x * 7.787 + 16.0 / 116.0 };
y = if y > 0.008856 { y.powf(1.0 / 3.0) } else { y * 7.787 + 16.0 / 116.0 };
z = if z > 0.008856 { z.powf(1.0 / 3.0) } else { z * 7.787 + 16.0 / 116.0 };
let out_l = 116.0 * y - 16.0;
let out_a = 500.0 * (x - y);
let out_b = 200.0 * (y - z);
vec![out_l, out_a, out_b, a]
}
#[wasm_bindgen]
pub fn rgba_laba_distance(
r1: f64,
g1: f64,
b1: f64,
a1: f64,
r2: f64,
g2: f64,
b2: f64,
a2: f64,
ref_x: f64,
ref_y: f64,
ref_z: f64,
) -> f64 {
let left = rgba2laba(r1, g1, b1, a1, ref_x, ref_y, ref_z);
let right = rgba2laba(r2, g2, b2, a2, ref_x, ref_y, ref_z);
let dist = ((right[0] - left[0]).powf(2.0) + (right[1] - left[1]).powf(2.0) + (right[2] - left[2]).powf(2.0)).sqrt();
dist
}
| {
let mut r = r / 255.0;
let mut g = g / 255.0;
let mut b = b / 255.0;
// Need lto = true in Cargo.toml to link pow
r = if r > 0.04045 { ((r + 0.055) / 1.055).powf(2.4) } else { r / 12.92 };
g = if g > 0.04045 { ((g + 0.055) / 1.055).powf(2.4) } else { g / 12.92 };
b = if b > 0.04045 { ((b + 0.055) / 1.055).powf(2.4) } else { b / 12.92 };
r *= 100.0;
g *= 100.0;
b *= 100.0;
// Observer= 2° (Only use CIE 1931!)
let mut x = r * 0.4124 + g * 0.3576 + b * 0.1805;
let mut y = r * 0.2126 + g * 0.7152 + b * 0.0722;
let mut z = r * 0.0193 + g * 0.1192 + b * 0.9505;
x /= ref_x; | identifier_body |
lib.rs | use wasm_bindgen::prelude::*;
#[rustfmt::skip]
#[wasm_bindgen]
pub fn rgba2laba(
r: f64,
g: f64,
b: f64,
a: f64,
ref_x: f64,
ref_y: f64,
ref_z: f64,
) -> Vec<f64> {
let mut r = r / 255.0;
let mut g = g / 255.0;
let mut b = b / 255.0;
// Need lto = true in Cargo.toml to link pow
r = if r > 0.04045 { ((r + 0.055) / 1.055).powf(2.4) } else { r / 12.92 };
g = if g > 0.04045 { ((g + 0.055) / 1.055).powf(2.4) } else { g / 12.92 };
b = if b > 0.04045 { ((b + 0.055) / 1.055).powf(2.4) } else { b / 12.92 };
r *= 100.0;
g *= 100.0;
b *= 100.0;
// Observer= 2° (Only use CIE 1931!)
let mut x = r * 0.4124 + g * 0.3576 + b * 0.1805;
let mut y = r * 0.2126 + g * 0.7152 + b * 0.0722;
let mut z = r * 0.0193 + g * 0.1192 + b * 0.9505;
x /= ref_x;
y /= ref_y;
z /= ref_z;
x = if x > 0.008856 { x.powf(1.0 / 3.0) } else { x * 7.787 + 16.0 / 116.0 };
y = if y > 0.008856 { y.powf(1.0 / 3.0) } else { y * 7.787 + 16.0 / 116.0 };
z = if z > 0.008856 { z.powf(1.0 / 3.0) } else { z * 7.787 + 16.0 / 116.0 };
let out_l = 116.0 * y - 16.0;
let out_a = 500.0 * (x - y);
let out_b = 200.0 * (y - z);
vec![out_l, out_a, out_b, a]
}
#[wasm_bindgen]
pub fn rgba_laba_distance(
r1: f64,
g1: f64, | g2: f64,
b2: f64,
a2: f64,
ref_x: f64,
ref_y: f64,
ref_z: f64,
) -> f64 {
let left = rgba2laba(r1, g1, b1, a1, ref_x, ref_y, ref_z);
let right = rgba2laba(r2, g2, b2, a2, ref_x, ref_y, ref_z);
let dist = ((right[0] - left[0]).powf(2.0) + (right[1] - left[1]).powf(2.0) + (right[2] - left[2]).powf(2.0)).sqrt();
dist
} | b1: f64,
a1: f64,
r2: f64, | random_line_split |
lib.rs | use wasm_bindgen::prelude::*;
#[rustfmt::skip]
#[wasm_bindgen]
pub fn rgba2laba(
r: f64,
g: f64,
b: f64,
a: f64,
ref_x: f64,
ref_y: f64,
ref_z: f64,
) -> Vec<f64> {
let mut r = r / 255.0;
let mut g = g / 255.0;
let mut b = b / 255.0;
// Need lto = true in Cargo.toml to link pow
r = if r > 0.04045 { ((r + 0.055) / 1.055).powf(2.4) } else { r / 12.92 };
g = if g > 0.04045 { ((g + 0.055) / 1.055).powf(2.4) } else | ;
b = if b > 0.04045 { ((b + 0.055) / 1.055).powf(2.4) } else { b / 12.92 };
r *= 100.0;
g *= 100.0;
b *= 100.0;
// Observer= 2° (Only use CIE 1931!)
let mut x = r * 0.4124 + g * 0.3576 + b * 0.1805;
let mut y = r * 0.2126 + g * 0.7152 + b * 0.0722;
let mut z = r * 0.0193 + g * 0.1192 + b * 0.9505;
x /= ref_x;
y /= ref_y;
z /= ref_z;
x = if x > 0.008856 { x.powf(1.0 / 3.0) } else { x * 7.787 + 16.0 / 116.0 };
y = if y > 0.008856 { y.powf(1.0 / 3.0) } else { y * 7.787 + 16.0 / 116.0 };
z = if z > 0.008856 { z.powf(1.0 / 3.0) } else { z * 7.787 + 16.0 / 116.0 };
let out_l = 116.0 * y - 16.0;
let out_a = 500.0 * (x - y);
let out_b = 200.0 * (y - z);
vec![out_l, out_a, out_b, a]
}
#[wasm_bindgen]
pub fn rgba_laba_distance(
r1: f64,
g1: f64,
b1: f64,
a1: f64,
r2: f64,
g2: f64,
b2: f64,
a2: f64,
ref_x: f64,
ref_y: f64,
ref_z: f64,
) -> f64 {
let left = rgba2laba(r1, g1, b1, a1, ref_x, ref_y, ref_z);
let right = rgba2laba(r2, g2, b2, a2, ref_x, ref_y, ref_z);
let dist = ((right[0] - left[0]).powf(2.0) + (right[1] - left[1]).powf(2.0) + (right[2] - left[2]).powf(2.0)).sqrt();
dist
}
| { g / 12.92 } | conditional_block |
main.rs | extern crate clap;
extern crate pwhash;
extern crate termios;
extern crate users;
use clap::{App, Arg};
use std::error;
use std::fmt;
use std::fs::File;
use std::io::{self, BufRead, BufReader, Write};
use std::path::Path;
use termios::{tcsetattr, Termios};
#[derive(Debug)] |
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
Error::Io(err)
}
}
impl From<pwhash::error::Error> for Error {
fn from(err: pwhash::error::Error) -> Error {
Error::PwHash(err)
}
}
impl From<String> for Error {
fn from(err: String) -> Error {
Error::Str(err)
}
}
impl From<&str> for Error {
fn from(err: &str) -> Error {
Error::Str(err.to_owned())
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Io(ref err) => write!(f, "IO error: {}", err),
Error::PwHash(ref err) => write!(f, "PwHash error: {}", err),
Error::Str(ref err) => f.write_str(err),
}
}
}
impl error::Error for Error {
fn cause(&self) -> Option<&dyn error::Error> {
match *self {
Error::Io(ref err) => Some(err),
Error::PwHash(ref err) => Some(err),
Error::Str(_) => None,
}
}
}
/// Prompt the password of a user
fn prompt_password() -> Result<String, Error> {
// Disable ECHO but echo the new line character
let initial_term = Termios::from_fd(0)?;
let mut term = initial_term;
term.c_lflag &=!termios::ECHO;
term.c_lflag |= termios::ECHONL;
tcsetattr(0, termios::TCSANOW, &term)?;
let mut password_line = String::new();
eprint!("Password: ");
let result = io::stderr()
.flush()
.and_then(|_| io::stdin().read_line(&mut password_line));
// Reset the initial terminal before returning a failure
tcsetattr(0, termios::TCSANOW, &initial_term)?;
result?;
Ok(password_line
.trim_end_matches(|c| c == '\r' || c == '\n')
.to_string())
}
/// Check a password using a `/etc/shadow` file
fn check_password_in_shadow<P: AsRef<Path>>(
shadow_path: P,
user: &str,
password_opt: Option<&str>,
) -> Result<(), Error> {
let mut is_found = false;
let mut prompted_password = None;
let file = File::open(shadow_path)?;
let file_buffer = BufReader::new(&file);
for line_result in file_buffer.lines() {
let line = line_result?;
let fields: Vec<&str> = line.split(':').collect();
if fields.len() >= 2 && fields[0] == user {
is_found = true;
let password_hash = fields[1];
if password_hash == "" || password_hash == "x" || password_hash.starts_with("!") {
println!("Ignoring hash {:?} for {}", password_hash, user);
continue;
}
println!("Found hash for {}: {:?}", user, password_hash);
// Prompt the user for a password if there was none provided
let password = match password_opt {
Some(p) => p,
None => {
if prompted_password.is_none() {
prompted_password = Some(prompt_password()?);
}
prompted_password.as_ref().unwrap()
}
};
// TODO: use a secure hash comparison function, which is constant-time
if pwhash::unix::crypt(password, password_hash)? == password_hash {
println!("The password is correct :)");
return Ok(());
}
}
}
if!is_found {
return Err(Error::Str("user not found in shadow file".to_owned()));
} else {
return Err(Error::Str("incorrect password".to_owned()));
}
}
/// Check a password using `unix_chkpwd` helper
///
/// The source code of the helper is
/// [on GitHub](https://github.com/linux-pam/linux-pam/blob/v1.3.1/modules/pam_unix/unix_chkpwd.c)
fn check_password_with_helper(user: &str, password_opt: Option<&str>) -> Result<(), Error> {
// Find unix_chkpwd
let mut unix_chkpwd_path_opt = None;
for path_dir in &["/bin", "/sbin", "/usr/bin", "/usr/sbin"] {
let path = path_dir.to_string() + "/unix_chkpwd";
if std::fs::metadata(&path).is_ok() {
unix_chkpwd_path_opt = Some(path);
break;
}
}
let unix_chkpwd_path = unix_chkpwd_path_opt.ok_or("unable to find unix_chkpwd helper")?;
println!("Using helper {}", unix_chkpwd_path);
let prompted_password;
let password = match password_opt {
Some(p) => p,
None => {
prompted_password = prompt_password()?;
prompted_password.as_ref()
}
};
let mut child = std::process::Command::new(unix_chkpwd_path)
.args(&[user, "nullok"])
.current_dir("/")
.stdin(std::process::Stdio::piped())
.spawn()?;
{
let stdin = child.stdin.as_mut().unwrap();
stdin.write_all(password.as_bytes())?;
stdin.write_all(&[0])?;
}
let exit_status = child.wait()?;
if!exit_status.success() {
if exit_status.code() == Some(7) {
return Err(Error::Str("incorrect password".to_owned()));
} else {
return Err(Error::Str(format!("unknown exit status ({})", exit_status)));
}
}
println!("The password is correct :)");
Ok(())
}
fn main_with_result() -> Result<(), Error> {
let matches = App::new("CheckLinuxPass")
.version("0.1.0")
.author("Nicolas Iooss")
.about("Check a password on a Linux system")
.arg(
Arg::with_name("user")
.takes_value(true)
.help("name of the user to check the password"),
)
.arg(
Arg::with_name("password")
.short("p")
.long("password")
.takes_value(true)
.help("password to test"),
)
.arg(
Arg::with_name("shadow_file")
.short("s")
.long("shadow")
.takes_value(true)
.help("use a shadow file to test the password"),
)
.get_matches();
let current_username;
let username: &str = match matches.value_of("user") {
Some(u) => u,
None => {
current_username =
users::get_current_username().ok_or("unable to get the current user name")?;
current_username
.to_str()
.ok_or("unable to convert the current user name to str")?
}
};
let password_opt = matches.value_of("password");
if let Some(shadow_path) = matches.value_of("shadow_file") {
// Parse /etc/shadow in search for the user
check_password_in_shadow(shadow_path, &username, password_opt)?;
} else {
check_password_with_helper(&username, password_opt)?;
}
Ok(())
}
fn main() {
if let Err(err) = main_with_result() {
eprintln!("Error: {}", err);
std::process::exit(1);
}
} | enum Error {
Io(io::Error),
PwHash(pwhash::error::Error),
Str(String),
} | random_line_split |
main.rs | extern crate clap;
extern crate pwhash;
extern crate termios;
extern crate users;
use clap::{App, Arg};
use std::error;
use std::fmt;
use std::fs::File;
use std::io::{self, BufRead, BufReader, Write};
use std::path::Path;
use termios::{tcsetattr, Termios};
#[derive(Debug)]
enum Error {
Io(io::Error),
PwHash(pwhash::error::Error),
Str(String),
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
Error::Io(err)
}
}
impl From<pwhash::error::Error> for Error {
fn from(err: pwhash::error::Error) -> Error {
Error::PwHash(err)
}
}
impl From<String> for Error {
fn from(err: String) -> Error {
Error::Str(err)
}
}
impl From<&str> for Error {
fn from(err: &str) -> Error {
Error::Str(err.to_owned())
}
}
impl fmt::Display for Error {
fn | (&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Io(ref err) => write!(f, "IO error: {}", err),
Error::PwHash(ref err) => write!(f, "PwHash error: {}", err),
Error::Str(ref err) => f.write_str(err),
}
}
}
impl error::Error for Error {
fn cause(&self) -> Option<&dyn error::Error> {
match *self {
Error::Io(ref err) => Some(err),
Error::PwHash(ref err) => Some(err),
Error::Str(_) => None,
}
}
}
/// Prompt the password of a user
fn prompt_password() -> Result<String, Error> {
// Disable ECHO but echo the new line character
let initial_term = Termios::from_fd(0)?;
let mut term = initial_term;
term.c_lflag &=!termios::ECHO;
term.c_lflag |= termios::ECHONL;
tcsetattr(0, termios::TCSANOW, &term)?;
let mut password_line = String::new();
eprint!("Password: ");
let result = io::stderr()
.flush()
.and_then(|_| io::stdin().read_line(&mut password_line));
// Reset the initial terminal before returning a failure
tcsetattr(0, termios::TCSANOW, &initial_term)?;
result?;
Ok(password_line
.trim_end_matches(|c| c == '\r' || c == '\n')
.to_string())
}
/// Check a password using a `/etc/shadow` file
fn check_password_in_shadow<P: AsRef<Path>>(
shadow_path: P,
user: &str,
password_opt: Option<&str>,
) -> Result<(), Error> {
let mut is_found = false;
let mut prompted_password = None;
let file = File::open(shadow_path)?;
let file_buffer = BufReader::new(&file);
for line_result in file_buffer.lines() {
let line = line_result?;
let fields: Vec<&str> = line.split(':').collect();
if fields.len() >= 2 && fields[0] == user {
is_found = true;
let password_hash = fields[1];
if password_hash == "" || password_hash == "x" || password_hash.starts_with("!") {
println!("Ignoring hash {:?} for {}", password_hash, user);
continue;
}
println!("Found hash for {}: {:?}", user, password_hash);
// Prompt the user for a password if there was none provided
let password = match password_opt {
Some(p) => p,
None => {
if prompted_password.is_none() {
prompted_password = Some(prompt_password()?);
}
prompted_password.as_ref().unwrap()
}
};
// TODO: use a secure hash comparison function, which is constant-time
if pwhash::unix::crypt(password, password_hash)? == password_hash {
println!("The password is correct :)");
return Ok(());
}
}
}
if!is_found {
return Err(Error::Str("user not found in shadow file".to_owned()));
} else {
return Err(Error::Str("incorrect password".to_owned()));
}
}
/// Check a password using `unix_chkpwd` helper
///
/// The source code of the helper is
/// [on GitHub](https://github.com/linux-pam/linux-pam/blob/v1.3.1/modules/pam_unix/unix_chkpwd.c)
fn check_password_with_helper(user: &str, password_opt: Option<&str>) -> Result<(), Error> {
// Find unix_chkpwd
let mut unix_chkpwd_path_opt = None;
for path_dir in &["/bin", "/sbin", "/usr/bin", "/usr/sbin"] {
let path = path_dir.to_string() + "/unix_chkpwd";
if std::fs::metadata(&path).is_ok() {
unix_chkpwd_path_opt = Some(path);
break;
}
}
let unix_chkpwd_path = unix_chkpwd_path_opt.ok_or("unable to find unix_chkpwd helper")?;
println!("Using helper {}", unix_chkpwd_path);
let prompted_password;
let password = match password_opt {
Some(p) => p,
None => {
prompted_password = prompt_password()?;
prompted_password.as_ref()
}
};
let mut child = std::process::Command::new(unix_chkpwd_path)
.args(&[user, "nullok"])
.current_dir("/")
.stdin(std::process::Stdio::piped())
.spawn()?;
{
let stdin = child.stdin.as_mut().unwrap();
stdin.write_all(password.as_bytes())?;
stdin.write_all(&[0])?;
}
let exit_status = child.wait()?;
if!exit_status.success() {
if exit_status.code() == Some(7) {
return Err(Error::Str("incorrect password".to_owned()));
} else {
return Err(Error::Str(format!("unknown exit status ({})", exit_status)));
}
}
println!("The password is correct :)");
Ok(())
}
fn main_with_result() -> Result<(), Error> {
let matches = App::new("CheckLinuxPass")
.version("0.1.0")
.author("Nicolas Iooss")
.about("Check a password on a Linux system")
.arg(
Arg::with_name("user")
.takes_value(true)
.help("name of the user to check the password"),
)
.arg(
Arg::with_name("password")
.short("p")
.long("password")
.takes_value(true)
.help("password to test"),
)
.arg(
Arg::with_name("shadow_file")
.short("s")
.long("shadow")
.takes_value(true)
.help("use a shadow file to test the password"),
)
.get_matches();
let current_username;
let username: &str = match matches.value_of("user") {
Some(u) => u,
None => {
current_username =
users::get_current_username().ok_or("unable to get the current user name")?;
current_username
.to_str()
.ok_or("unable to convert the current user name to str")?
}
};
let password_opt = matches.value_of("password");
if let Some(shadow_path) = matches.value_of("shadow_file") {
// Parse /etc/shadow in search for the user
check_password_in_shadow(shadow_path, &username, password_opt)?;
} else {
check_password_with_helper(&username, password_opt)?;
}
Ok(())
}
fn main() {
if let Err(err) = main_with_result() {
eprintln!("Error: {}", err);
std::process::exit(1);
}
}
| fmt | identifier_name |
extrapolatable.rs | //
// This file is part of zero_sum.
//
// zero_sum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// zero_sum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with zero_sum. If not, see <http://www.gnu.org/licenses/>.
//
// Copyright 2016-2017 Chris Foster
//
use ply::Ply;
/// Provides the possible plies from a given state.
pub trait Extrapolatable<P> where
P: Ply {
/// Returns a list of plies that should be considered from the current state. The search | fn extrapolate(&self) -> Vec<P>;
} | /// system does not assume that all plies returned are correct. | random_line_split |
redis-to-sqs.rs | #[tokio::main]
async fn | () -> Result<(), anyhow::Error> {
use redis::Commands as _;
use rusoto_sqs::Sqs as _;
let config = encoder::load_config()?;
let redis_client = redis::Client::open(config.redis.url)?;
let mut conn = redis_client.get_connection()?;
let sqs_client = rusoto_sqs::SqsClient::new(Default::default());
loop {
let job: Vec<String> = conn.blpop(&["jobs", "0"], 5)?;
if job.is_empty() {
break;
}
let fname = job.into_iter().nth(1).unwrap();
println!("Enqueue {}", fname);
sqs_client
.send_message(rusoto_sqs::SendMessageRequest {
queue_url: config.sqs.queue_url.clone(),
message_body: fname,
..Default::default()
})
.await?;
}
Ok(())
}
| main | identifier_name |
redis-to-sqs.rs | #[tokio::main]
async fn main() -> Result<(), anyhow::Error> {
use redis::Commands as _;
use rusoto_sqs::Sqs as _;
let config = encoder::load_config()?;
let redis_client = redis::Client::open(config.redis.url)?;
let mut conn = redis_client.get_connection()?;
let sqs_client = rusoto_sqs::SqsClient::new(Default::default());
loop {
let job: Vec<String> = conn.blpop(&["jobs", "0"], 5)?;
if job.is_empty() {
break;
}
let fname = job.into_iter().nth(1).unwrap();
println!("Enqueue {}", fname); | .send_message(rusoto_sqs::SendMessageRequest {
queue_url: config.sqs.queue_url.clone(),
message_body: fname,
..Default::default()
})
.await?;
}
Ok(())
} |
sqs_client | random_line_split |
redis-to-sqs.rs | #[tokio::main]
async fn main() -> Result<(), anyhow::Error> | message_body: fname,
..Default::default()
})
.await?;
}
Ok(())
}
| {
use redis::Commands as _;
use rusoto_sqs::Sqs as _;
let config = encoder::load_config()?;
let redis_client = redis::Client::open(config.redis.url)?;
let mut conn = redis_client.get_connection()?;
let sqs_client = rusoto_sqs::SqsClient::new(Default::default());
loop {
let job: Vec<String> = conn.blpop(&["jobs", "0"], 5)?;
if job.is_empty() {
break;
}
let fname = job.into_iter().nth(1).unwrap();
println!("Enqueue {}", fname);
sqs_client
.send_message(rusoto_sqs::SendMessageRequest {
queue_url: config.sqs.queue_url.clone(), | identifier_body |
redis-to-sqs.rs | #[tokio::main]
async fn main() -> Result<(), anyhow::Error> {
use redis::Commands as _;
use rusoto_sqs::Sqs as _;
let config = encoder::load_config()?;
let redis_client = redis::Client::open(config.redis.url)?;
let mut conn = redis_client.get_connection()?;
let sqs_client = rusoto_sqs::SqsClient::new(Default::default());
loop {
let job: Vec<String> = conn.blpop(&["jobs", "0"], 5)?;
if job.is_empty() |
let fname = job.into_iter().nth(1).unwrap();
println!("Enqueue {}", fname);
sqs_client
.send_message(rusoto_sqs::SendMessageRequest {
queue_url: config.sqs.queue_url.clone(),
message_body: fname,
..Default::default()
})
.await?;
}
Ok(())
}
| {
break;
} | conditional_block |
weight.rs | use super::Scorer;
use crate::core::SegmentReader;
use crate::query::Explanation;
use crate::{DocId, Score, TERMINATED};
/// Iterates through all of the document matched by the DocSet
/// `DocSet` and push the scored documents to the collector.
pub(crate) fn for_each_scorer<TScorer: Scorer +?Sized>(
scorer: &mut TScorer,
callback: &mut dyn FnMut(DocId, Score),
) {
let mut doc = scorer.doc();
while doc!= TERMINATED {
callback(doc, scorer.score());
doc = scorer.advance();
}
}
/// Calls `callback` with all of the `(doc, score)` for which score
/// is exceeding a given threshold.
///
/// This method is useful for the TopDocs collector.
/// For all docsets, the blanket implementation has the benefit
/// of prefiltering (doc, score) pairs, avoiding the
/// virtual dispatch cost.
///
/// More importantly, it makes it possible for scorers to implement
/// important optimization (e.g. BlockWAND for union).
pub(crate) fn for_each_pruning_scorer<TScorer: Scorer +?Sized>(
scorer: &mut TScorer,
mut threshold: Score,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) {
let mut doc = scorer.doc();
while doc!= TERMINATED {
let score = scorer.score();
if score > threshold {
threshold = callback(doc, score);
}
doc = scorer.advance();
}
}
/// A Weight is the specialization of a Query
/// for a given set of segments.
///
/// See [`Query`](./trait.Query.html).
pub trait Weight: Send + Sync +'static {
/// Returns the scorer for the given segment.
///
/// `boost` is a multiplier to apply to the score.
///
/// See [`Query`](./trait.Query.html).
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>>;
/// Returns an `Explanation` for the given document.
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation>;
/// Returns the number documents within the given `SegmentReader`.
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
let mut scorer = self.scorer(reader, 1.0)?;
if let Some(alive_bitset) = reader.alive_bitset() {
Ok(scorer.count(alive_bitset))
} else |
}
/// Iterates through all of the document matched by the DocSet
/// `DocSet` and push the scored documents to the collector.
fn for_each(
&self,
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score),
) -> crate::Result<()> {
let mut scorer = self.scorer(reader, 1.0)?;
for_each_scorer(scorer.as_mut(), callback);
Ok(())
}
/// Calls `callback` with all of the `(doc, score)` for which score
/// is exceeding a given threshold.
///
/// This method is useful for the TopDocs collector.
/// For all docsets, the blanket implementation has the benefit
/// of prefiltering (doc, score) pairs, avoiding the
/// virtual dispatch cost.
///
/// More importantly, it makes it possible for scorers to implement
/// important optimization (e.g. BlockWAND for union).
fn for_each_pruning(
&self,
threshold: Score,
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) -> crate::Result<()> {
let mut scorer = self.scorer(reader, 1.0)?;
for_each_pruning_scorer(scorer.as_mut(), threshold, callback);
Ok(())
}
}
| {
Ok(scorer.count_including_deleted())
} | conditional_block |
weight.rs | use super::Scorer;
use crate::core::SegmentReader;
use crate::query::Explanation;
use crate::{DocId, Score, TERMINATED};
/// Iterates through all of the document matched by the DocSet
/// `DocSet` and push the scored documents to the collector.
pub(crate) fn for_each_scorer<TScorer: Scorer +?Sized>(
scorer: &mut TScorer,
callback: &mut dyn FnMut(DocId, Score),
) |
/// Calls `callback` with all of the `(doc, score)` for which score
/// is exceeding a given threshold.
///
/// This method is useful for the TopDocs collector.
/// For all docsets, the blanket implementation has the benefit
/// of prefiltering (doc, score) pairs, avoiding the
/// virtual dispatch cost.
///
/// More importantly, it makes it possible for scorers to implement
/// important optimization (e.g. BlockWAND for union).
pub(crate) fn for_each_pruning_scorer<TScorer: Scorer +?Sized>(
scorer: &mut TScorer,
mut threshold: Score,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) {
let mut doc = scorer.doc();
while doc!= TERMINATED {
let score = scorer.score();
if score > threshold {
threshold = callback(doc, score);
}
doc = scorer.advance();
}
}
/// A Weight is the specialization of a Query
/// for a given set of segments.
///
/// See [`Query`](./trait.Query.html).
pub trait Weight: Send + Sync +'static {
/// Returns the scorer for the given segment.
///
/// `boost` is a multiplier to apply to the score.
///
/// See [`Query`](./trait.Query.html).
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>>;
/// Returns an `Explanation` for the given document.
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation>;
/// Returns the number documents within the given `SegmentReader`.
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
let mut scorer = self.scorer(reader, 1.0)?;
if let Some(alive_bitset) = reader.alive_bitset() {
Ok(scorer.count(alive_bitset))
} else {
Ok(scorer.count_including_deleted())
}
}
/// Iterates through all of the document matched by the DocSet
/// `DocSet` and push the scored documents to the collector.
fn for_each(
&self,
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score),
) -> crate::Result<()> {
let mut scorer = self.scorer(reader, 1.0)?;
for_each_scorer(scorer.as_mut(), callback);
Ok(())
}
/// Calls `callback` with all of the `(doc, score)` for which score
/// is exceeding a given threshold.
///
/// This method is useful for the TopDocs collector.
/// For all docsets, the blanket implementation has the benefit
/// of prefiltering (doc, score) pairs, avoiding the
/// virtual dispatch cost.
///
/// More importantly, it makes it possible for scorers to implement
/// important optimization (e.g. BlockWAND for union).
fn for_each_pruning(
&self,
threshold: Score,
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) -> crate::Result<()> {
let mut scorer = self.scorer(reader, 1.0)?;
for_each_pruning_scorer(scorer.as_mut(), threshold, callback);
Ok(())
}
}
| {
let mut doc = scorer.doc();
while doc != TERMINATED {
callback(doc, scorer.score());
doc = scorer.advance();
}
} | identifier_body |
weight.rs | use super::Scorer;
use crate::core::SegmentReader;
use crate::query::Explanation;
use crate::{DocId, Score, TERMINATED};
/// Iterates through all of the document matched by the DocSet
/// `DocSet` and push the scored documents to the collector.
pub(crate) fn for_each_scorer<TScorer: Scorer +?Sized>(
scorer: &mut TScorer,
callback: &mut dyn FnMut(DocId, Score),
) {
let mut doc = scorer.doc();
while doc!= TERMINATED {
callback(doc, scorer.score());
doc = scorer.advance();
}
}
/// Calls `callback` with all of the `(doc, score)` for which score
/// is exceeding a given threshold.
///
/// This method is useful for the TopDocs collector.
/// For all docsets, the blanket implementation has the benefit
/// of prefiltering (doc, score) pairs, avoiding the
/// virtual dispatch cost.
///
/// More importantly, it makes it possible for scorers to implement
/// important optimization (e.g. BlockWAND for union).
pub(crate) fn for_each_pruning_scorer<TScorer: Scorer +?Sized>(
scorer: &mut TScorer,
mut threshold: Score,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) {
let mut doc = scorer.doc();
while doc!= TERMINATED {
let score = scorer.score();
if score > threshold {
threshold = callback(doc, score);
}
doc = scorer.advance();
}
}
/// A Weight is the specialization of a Query
/// for a given set of segments.
///
/// See [`Query`](./trait.Query.html).
pub trait Weight: Send + Sync +'static {
/// Returns the scorer for the given segment.
///
/// `boost` is a multiplier to apply to the score.
///
/// See [`Query`](./trait.Query.html).
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>>;
/// Returns an `Explanation` for the given document.
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation>;
/// Returns the number documents within the given `SegmentReader`.
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
let mut scorer = self.scorer(reader, 1.0)?;
if let Some(alive_bitset) = reader.alive_bitset() {
Ok(scorer.count(alive_bitset))
} else {
Ok(scorer.count_including_deleted())
}
}
/// Iterates through all of the document matched by the DocSet
/// `DocSet` and push the scored documents to the collector.
fn | (
&self,
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score),
) -> crate::Result<()> {
let mut scorer = self.scorer(reader, 1.0)?;
for_each_scorer(scorer.as_mut(), callback);
Ok(())
}
/// Calls `callback` with all of the `(doc, score)` for which score
/// is exceeding a given threshold.
///
/// This method is useful for the TopDocs collector.
/// For all docsets, the blanket implementation has the benefit
/// of prefiltering (doc, score) pairs, avoiding the
/// virtual dispatch cost.
///
/// More importantly, it makes it possible for scorers to implement
/// important optimization (e.g. BlockWAND for union).
fn for_each_pruning(
&self,
threshold: Score,
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) -> crate::Result<()> {
let mut scorer = self.scorer(reader, 1.0)?;
for_each_pruning_scorer(scorer.as_mut(), threshold, callback);
Ok(())
}
}
| for_each | identifier_name |
weight.rs | use super::Scorer;
use crate::core::SegmentReader;
use crate::query::Explanation;
use crate::{DocId, Score, TERMINATED};
/// Iterates through all of the document matched by the DocSet
/// `DocSet` and push the scored documents to the collector.
pub(crate) fn for_each_scorer<TScorer: Scorer +?Sized>(
scorer: &mut TScorer,
callback: &mut dyn FnMut(DocId, Score),
) {
let mut doc = scorer.doc();
while doc!= TERMINATED {
callback(doc, scorer.score());
doc = scorer.advance();
}
}
/// Calls `callback` with all of the `(doc, score)` for which score
/// is exceeding a given threshold.
///
/// This method is useful for the TopDocs collector.
/// For all docsets, the blanket implementation has the benefit
/// of prefiltering (doc, score) pairs, avoiding the
/// virtual dispatch cost.
///
/// More importantly, it makes it possible for scorers to implement
/// important optimization (e.g. BlockWAND for union).
pub(crate) fn for_each_pruning_scorer<TScorer: Scorer +?Sized>(
scorer: &mut TScorer,
mut threshold: Score,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) {
let mut doc = scorer.doc();
while doc!= TERMINATED {
let score = scorer.score();
if score > threshold {
threshold = callback(doc, score);
}
doc = scorer.advance();
}
}
/// A Weight is the specialization of a Query
/// for a given set of segments.
///
/// See [`Query`](./trait.Query.html).
pub trait Weight: Send + Sync +'static {
/// Returns the scorer for the given segment.
///
/// `boost` is a multiplier to apply to the score.
///
/// See [`Query`](./trait.Query.html).
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>>;
/// Returns an `Explanation` for the given document.
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation>;
/// Returns the number documents within the given `SegmentReader`.
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
let mut scorer = self.scorer(reader, 1.0)?;
if let Some(alive_bitset) = reader.alive_bitset() {
Ok(scorer.count(alive_bitset))
} else {
Ok(scorer.count_including_deleted())
}
}
/// Iterates through all of the document matched by the DocSet
/// `DocSet` and push the scored documents to the collector.
fn for_each(
&self,
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score),
) -> crate::Result<()> {
let mut scorer = self.scorer(reader, 1.0)?;
for_each_scorer(scorer.as_mut(), callback);
Ok(())
}
/// Calls `callback` with all of the `(doc, score)` for which score
/// is exceeding a given threshold.
///
/// This method is useful for the TopDocs collector. | /// important optimization (e.g. BlockWAND for union).
fn for_each_pruning(
&self,
threshold: Score,
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) -> crate::Result<()> {
let mut scorer = self.scorer(reader, 1.0)?;
for_each_pruning_scorer(scorer.as_mut(), threshold, callback);
Ok(())
}
} | /// For all docsets, the blanket implementation has the benefit
/// of prefiltering (doc, score) pairs, avoiding the
/// virtual dispatch cost.
///
/// More importantly, it makes it possible for scorers to implement | random_line_split |
objects-owned-object-borrowed-method-headerless.rs | //
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test invoked `&self` methods on owned objects where the values
// closed over do not contain managed values, and thus the boxes do
// not have headers.
trait FooTrait {
fn foo(&self) -> uint;
}
struct BarStruct {
x: uint
}
impl FooTrait for BarStruct {
fn foo(&self) -> uint {
self.x
}
}
pub fn main() {
let foos: Vec<Box<FooTrait>> = vec!(
box BarStruct{ x: 0 } as Box<FooTrait>,
box BarStruct{ x: 1 } as Box<FooTrait>,
box BarStruct{ x: 2 } as Box<FooTrait>
);
for i in range(0u, foos.len()) {
assert_eq!(i, foos[i].foo());
}
} | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT. | random_line_split |
|
objects-owned-object-borrowed-method-headerless.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test invoked `&self` methods on owned objects where the values
// closed over do not contain managed values, and thus the boxes do
// not have headers.
trait FooTrait {
fn foo(&self) -> uint;
}
struct | {
x: uint
}
impl FooTrait for BarStruct {
fn foo(&self) -> uint {
self.x
}
}
pub fn main() {
let foos: Vec<Box<FooTrait>> = vec!(
box BarStruct{ x: 0 } as Box<FooTrait>,
box BarStruct{ x: 1 } as Box<FooTrait>,
box BarStruct{ x: 2 } as Box<FooTrait>
);
for i in range(0u, foos.len()) {
assert_eq!(i, foos[i].foo());
}
}
| BarStruct | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.