file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
physics.rs | use super::{Unit, ToUnit};
#[deriving(Eq, Ord)]
pub struct Vec2 {
pub x: Unit,
pub y: Unit,
}
impl Vec2 {
pub fn new<A: ToUnit, B: ToUnit>(x: A, y: B) -> Vec2 {
Vec2 {
x: x.to_unit(),
y: y.to_unit(),
}
}
pub fn norm(&self) -> Vec2 {
let len = self.length();
Vec2::new(self.x / len, self.y / len)
}
pub fn length(&self) -> Unit {
let x = self.x.val();
let y = self.y.val();
Unit((x * x + y * y).sqrt())
}
}
pub trait ToVec2 {
fn to_vec(&self) -> Vec2;
}
impl ToVec2 for Vec2 {
fn to_vec(&self) -> Vec2 {
*self
}
}
impl Add<Vec2, Vec2> for Vec2 {
fn add(&self, rhs: &Vec2) -> Vec2 {
Vec2 {
x: self.x + rhs.x,
y: self.y + rhs.y,
}
}
}
impl<T: ToUnit> Mul<T, Vec2> for Vec2 {
fn mul(&self, rhs: &T) -> Vec2 {
let a = rhs.to_unit();
Vec2 {
x: self.x * a,
y: self.y * a,
}
}
}
/// x axis from left to right and y asix from top to bottom
pub struct AABB {
pub center: Vec2,
pub size: Vec2,
}
impl AABB {
pub fn new<A: ToUnit, B: ToUnit, C: ToUnit, D: ToUnit>(x: A, y: B, w: C, h: D) -> AABB |
pub fn transform(&self, offset: Vec2) -> AABB {
AABB {
center: self.center + offset,
size: self.size,
}
}
pub fn is_collided_with(&self, other: &AABB) -> bool {
self.right() >= other.left() &&
self.left() <= other.right() &&
self.top() <= other.bottom() &&
self.bottom() >= other.top()
}
pub fn left(&self) -> Unit {
self.center.x - self.size.x / 2.0
}
pub fn right(&self) -> Unit {
self.center.x + self.size.x / 2.0
}
pub fn top(&self) -> Unit {
self.center.y - self.size.y / 2.0
}
pub fn bottom(&self) -> Unit {
self.center.y + self.size.y / 2.0
}
pub fn size(&self) -> Vec2 {
self.size
}
}
#[deriving(Eq, Ord)]
pub struct MS(pub uint);
impl MS {
pub fn val(&self) -> uint {
let MS(a) = *self;
a
}
}
impl ToUnit for MS {
fn to_unit(&self) -> Unit {
let MS(a) = *self;
Unit(a as f32)
}
}
impl Sub<MS, MS> for MS {
fn sub(&self, rhs: &MS) -> MS {
MS(self.val() - rhs.val())
}
}
impl Add<MS, MS> for MS {
fn add(&self, rhs: &MS) -> MS {
MS(self.val() + rhs.val())
}
}
| {
AABB {
center: Vec2::new(x, y),
size: Vec2::new(w, h),
}
} | identifier_body |
physics.rs | use super::{Unit, ToUnit};
#[deriving(Eq, Ord)]
pub struct Vec2 {
pub x: Unit,
pub y: Unit,
}
impl Vec2 {
pub fn new<A: ToUnit, B: ToUnit>(x: A, y: B) -> Vec2 {
Vec2 {
x: x.to_unit(),
y: y.to_unit(),
}
}
pub fn norm(&self) -> Vec2 {
let len = self.length();
Vec2::new(self.x / len, self.y / len)
}
pub fn length(&self) -> Unit {
let x = self.x.val();
let y = self.y.val();
Unit((x * x + y * y).sqrt())
}
}
pub trait ToVec2 {
fn to_vec(&self) -> Vec2;
}
impl ToVec2 for Vec2 {
fn to_vec(&self) -> Vec2 {
*self
}
}
impl Add<Vec2, Vec2> for Vec2 {
fn add(&self, rhs: &Vec2) -> Vec2 {
Vec2 {
x: self.x + rhs.x,
y: self.y + rhs.y,
}
}
}
impl<T: ToUnit> Mul<T, Vec2> for Vec2 {
fn mul(&self, rhs: &T) -> Vec2 {
let a = rhs.to_unit();
Vec2 {
x: self.x * a,
y: self.y * a, | }
}
}
/// x axis from left to right and y asix from top to bottom
pub struct AABB {
pub center: Vec2,
pub size: Vec2,
}
impl AABB {
pub fn new<A: ToUnit, B: ToUnit, C: ToUnit, D: ToUnit>(x: A, y: B, w: C, h: D) -> AABB {
AABB {
center: Vec2::new(x, y),
size: Vec2::new(w, h),
}
}
pub fn transform(&self, offset: Vec2) -> AABB {
AABB {
center: self.center + offset,
size: self.size,
}
}
pub fn is_collided_with(&self, other: &AABB) -> bool {
self.right() >= other.left() &&
self.left() <= other.right() &&
self.top() <= other.bottom() &&
self.bottom() >= other.top()
}
pub fn left(&self) -> Unit {
self.center.x - self.size.x / 2.0
}
pub fn right(&self) -> Unit {
self.center.x + self.size.x / 2.0
}
pub fn top(&self) -> Unit {
self.center.y - self.size.y / 2.0
}
pub fn bottom(&self) -> Unit {
self.center.y + self.size.y / 2.0
}
pub fn size(&self) -> Vec2 {
self.size
}
}
#[deriving(Eq, Ord)]
pub struct MS(pub uint);
impl MS {
pub fn val(&self) -> uint {
let MS(a) = *self;
a
}
}
impl ToUnit for MS {
fn to_unit(&self) -> Unit {
let MS(a) = *self;
Unit(a as f32)
}
}
impl Sub<MS, MS> for MS {
fn sub(&self, rhs: &MS) -> MS {
MS(self.val() - rhs.val())
}
}
impl Add<MS, MS> for MS {
fn add(&self, rhs: &MS) -> MS {
MS(self.val() + rhs.val())
}
} | random_line_split |
|
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![deny(unsafe_code)]
extern crate app_units;
extern crate atomic_refcell;
#[macro_use]
extern crate bitflags;
extern crate canvas_traits;
extern crate euclid;
extern crate fnv;
extern crate gfx;
extern crate gfx_traits;
#[macro_use] extern crate html5ever;
extern crate ipc_channel;
extern crate libc;
#[macro_use]
extern crate log;
extern crate malloc_size_of;
extern crate msg;
extern crate net_traits;
extern crate ordered_float;
extern crate parking_lot;
extern crate profile_traits;
#[macro_use]
extern crate range;
extern crate rayon;
extern crate script_layout_interface;
extern crate script_traits;
#[macro_use] extern crate serde;
extern crate serde_json;
extern crate servo_arc;
extern crate servo_atoms;
extern crate servo_config;
extern crate servo_geometry;
extern crate servo_url;
extern crate smallvec;
extern crate style;
extern crate style_traits;
extern crate unicode_bidi;
extern crate unicode_script;
extern crate webrender_api;
extern crate xi_unicode;
#[macro_use]
pub mod layout_debug;
pub mod animation;
mod block;
pub mod construct;
pub mod context;
pub mod data;
pub mod display_list;
mod flex;
mod floats;
pub mod flow;
mod flow_list; | mod generated_content;
pub mod incremental;
mod inline;
mod linked_list;
mod list_item;
mod model;
mod multicol;
pub mod opaque_node;
pub mod parallel;
mod persistent_list;
pub mod query;
pub mod sequential;
mod table;
mod table_caption;
mod table_cell;
mod table_colgroup;
mod table_row;
mod table_rowgroup;
mod table_wrapper;
mod text;
pub mod traversal;
pub mod wrapper;
// For unit tests:
pub use fragment::Fragment;
pub use fragment::SpecificFragmentInfo;
pub use self::data::LayoutData;
// We can't use servo_arc for everything in layout, because the Flow stuff uses
// weak references.
use servo_arc::Arc as ServoArc; | pub mod flow_ref;
mod fragment; | random_line_split |
query.rs | //! Values computed by queries that use MIR.
use crate::mir::{abstract_const, Body, Promoted};
use crate::ty::{self, Ty, TyCtxt};
use rustc_data_structures::sync::Lrc;
use rustc_data_structures::vec_map::VecMap;
use rustc_errors::ErrorReported;
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_index::bit_set::BitMatrix;
use rustc_index::vec::IndexVec;
use rustc_middle::ty::OpaqueTypeKey;
use rustc_span::Span;
use rustc_target::abi::VariantIdx;
use smallvec::SmallVec;
use std::cell::Cell;
use std::fmt::{self, Debug};
use super::{Field, SourceInfo};
#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
pub enum UnsafetyViolationKind {
/// Unsafe operation outside `unsafe`.
General,
/// Unsafe operation in an `unsafe fn` but outside an `unsafe` block.
/// Has to be handled as a lint for backwards compatibility.
UnsafeFn,
}
#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
pub enum UnsafetyViolationDetails {
CallToUnsafeFunction,
UseOfInlineAssembly,
InitializingTypeWith,
CastOfPointerToInt,
UseOfMutableStatic,
UseOfExternStatic,
DerefOfRawPointer,
AssignToDroppingUnionField,
AccessToUnionField,
MutationOfLayoutConstrainedField,
BorrowOfLayoutConstrainedField,
CallToFunctionWith,
}
impl UnsafetyViolationDetails {
pub fn description_and_note(&self) -> (&'static str, &'static str) {
use UnsafetyViolationDetails::*;
match self {
CallToUnsafeFunction => (
"call to unsafe function",
"consult the function's documentation for information on how to avoid undefined \
behavior",
),
UseOfInlineAssembly => (
"use of inline assembly",
"inline assembly is entirely unchecked and can cause undefined behavior",
),
InitializingTypeWith => (
"initializing type with `rustc_layout_scalar_valid_range` attr",
"initializing a layout restricted type's field with a value outside the valid \
range is undefined behavior",
),
CastOfPointerToInt => {
("cast of pointer to int", "casting pointers to integers in constants")
}
UseOfMutableStatic => (
"use of mutable static",
"mutable statics can be mutated by multiple threads: aliasing violations or data \
races will cause undefined behavior",
),
UseOfExternStatic => (
"use of extern static",
"extern statics are not controlled by the Rust type system: invalid data, \
aliasing violations or data races will cause undefined behavior",
),
DerefOfRawPointer => (
"dereference of raw pointer",
"raw pointers may be null, dangling or unaligned; they can violate aliasing rules \
and cause data races: all of these are undefined behavior",
),
AssignToDroppingUnionField => (
"assignment to union field that might need dropping",
"the previous content of the field will be dropped, which causes undefined \
behavior if the field was not properly initialized",
),
AccessToUnionField => (
"access to union field",
"the field may not be properly initialized: using uninitialized data will cause \
undefined behavior",
),
MutationOfLayoutConstrainedField => (
"mutation of layout constrained field",
"mutating layout constrained fields cannot statically be checked for valid values",
),
BorrowOfLayoutConstrainedField => (
"borrow of layout constrained field with interior mutability",
"references to fields of layout constrained fields lose the constraints. Coupled \
with interior mutability, the field can be changed to invalid values",
),
CallToFunctionWith => (
"call to function with `#[target_feature]`",
"can only be called if the required target features are available",
),
}
}
}
#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
pub struct UnsafetyViolation {
pub source_info: SourceInfo,
pub lint_root: hir::HirId,
pub kind: UnsafetyViolationKind,
pub details: UnsafetyViolationDetails,
}
#[derive(Clone, TyEncodable, TyDecodable, HashStable, Debug)]
pub struct UnsafetyCheckResult {
/// Violations that are propagated *upwards* from this function.
pub violations: Lrc<[UnsafetyViolation]>,
/// `unsafe` blocks in this function, along with whether they are used. This is
/// used for the "unused_unsafe" lint.
pub unsafe_blocks: Lrc<[(hir::HirId, bool)]>,
}
rustc_index::newtype_index! {
pub struct GeneratorSavedLocal {
derive [HashStable]
DEBUG_FORMAT = "_{}",
}
}
/// The layout of generator state.
#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
pub struct GeneratorLayout<'tcx> {
/// The type of every local stored inside the generator.
pub field_tys: IndexVec<GeneratorSavedLocal, Ty<'tcx>>,
/// Which of the above fields are in each variant. Note that one field may
/// be stored in multiple variants.
pub variant_fields: IndexVec<VariantIdx, IndexVec<Field, GeneratorSavedLocal>>,
/// The source that led to each variant being created (usually, a yield or
/// await).
pub variant_source_info: IndexVec<VariantIdx, SourceInfo>,
/// Which saved locals are storage-live at the same time. Locals that do not
/// have conflicts with each other are allowed to overlap in the computed
/// layout.
pub storage_conflicts: BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal>,
}
impl Debug for GeneratorLayout<'_> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
/// Prints an iterator of (key, value) tuples as a map.
struct MapPrinter<'a, K, V>(Cell<Option<Box<dyn Iterator<Item = (K, V)> + 'a>>>);
impl<'a, K, V> MapPrinter<'a, K, V> {
fn new(iter: impl Iterator<Item = (K, V)> + 'a) -> Self |
}
impl<'a, K: Debug, V: Debug> Debug for MapPrinter<'a, K, V> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_map().entries(self.0.take().unwrap()).finish()
}
}
/// Prints the generator variant name.
struct GenVariantPrinter(VariantIdx);
impl From<VariantIdx> for GenVariantPrinter {
fn from(idx: VariantIdx) -> Self {
GenVariantPrinter(idx)
}
}
impl Debug for GenVariantPrinter {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let variant_name = ty::GeneratorSubsts::variant_name(self.0);
if fmt.alternate() {
write!(fmt, "{:9}({:?})", variant_name, self.0)
} else {
write!(fmt, "{}", variant_name)
}
}
}
/// Forces its contents to print in regular mode instead of alternate mode.
struct OneLinePrinter<T>(T);
impl<T: Debug> Debug for OneLinePrinter<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "{:?}", self.0)
}
}
fmt.debug_struct("GeneratorLayout")
.field("field_tys", &MapPrinter::new(self.field_tys.iter_enumerated()))
.field(
"variant_fields",
&MapPrinter::new(
self.variant_fields
.iter_enumerated()
.map(|(k, v)| (GenVariantPrinter(k), OneLinePrinter(v))),
),
)
.field("storage_conflicts", &self.storage_conflicts)
.finish()
}
}
#[derive(Debug, TyEncodable, TyDecodable, HashStable)]
pub struct BorrowCheckResult<'tcx> {
/// All the opaque types that are restricted to concrete types
/// by this function. Unlike the value in `TypeckResults`, this has
/// unerased regions.
pub concrete_opaque_types: VecMap<OpaqueTypeKey<'tcx>, Ty<'tcx>>,
pub closure_requirements: Option<ClosureRegionRequirements<'tcx>>,
pub used_mut_upvars: SmallVec<[Field; 8]>,
}
/// The result of the `mir_const_qualif` query.
///
/// Each field (except `error_occured`) corresponds to an implementer of the `Qualif` trait in
/// `rustc_mir/src/transform/check_consts/qualifs.rs`. See that file for more information on each
/// `Qualif`.
#[derive(Clone, Copy, Debug, Default, TyEncodable, TyDecodable, HashStable)]
pub struct ConstQualifs {
pub has_mut_interior: bool,
pub needs_drop: bool,
pub custom_eq: bool,
pub error_occured: Option<ErrorReported>,
}
/// After we borrow check a closure, we are left with various
/// requirements that we have inferred between the free regions that
/// appear in the closure's signature or on its field types. These
/// requirements are then verified and proved by the closure's
/// creating function. This struct encodes those requirements.
///
/// The requirements are listed as being between various `RegionVid`. The 0th
/// region refers to `'static`; subsequent region vids refer to the free
/// regions that appear in the closure (or generator's) type, in order of
/// appearance. (This numbering is actually defined by the `UniversalRegions`
/// struct in the NLL region checker. See for example
/// `UniversalRegions::closure_mapping`.) Note the free regions in the
/// closure's signature and captures are erased.
///
/// Example: If type check produces a closure with the closure substs:
///
/// ```text
/// ClosureSubsts = [
/// 'a, // From the parent.
/// 'b,
/// i8, // the "closure kind"
/// for<'x> fn(&'<erased> &'x u32) -> &'x u32, // the "closure signature"
/// &'<erased> String, // some upvar
/// ]
/// ```
///
/// We would "renumber" each free region to a unique vid, as follows:
///
/// ```text
/// ClosureSubsts = [
/// '1, // From the parent.
/// '2,
/// i8, // the "closure kind"
/// for<'x> fn(&'3 &'x u32) -> &'x u32, // the "closure signature"
/// &'4 String, // some upvar
/// ]
/// ```
///
/// Now the code might impose a requirement like `'1: '2`. When an
/// instance of the closure is created, the corresponding free regions
/// can be extracted from its type and constrained to have the given
/// outlives relationship.
///
/// In some cases, we have to record outlives requirements between types and
/// regions as well. In that case, if those types include any regions, those
/// regions are recorded using their external names (`ReStatic`,
/// `ReEarlyBound`, `ReFree`). We use these because in a query response we
/// cannot use `ReVar` (which is what we use internally within the rest of the
/// NLL code).
#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
pub struct ClosureRegionRequirements<'tcx> {
/// The number of external regions defined on the closure. In our
/// example above, it would be 3 -- one for `'static`, then `'1`
/// and `'2`. This is just used for a sanity check later on, to
/// make sure that the number of regions we see at the callsite
/// matches.
pub num_external_vids: usize,
/// Requirements between the various free regions defined in
/// indices.
pub outlives_requirements: Vec<ClosureOutlivesRequirement<'tcx>>,
}
/// Indicates an outlives-constraint between a type or between two
/// free regions declared on the closure.
#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
pub struct ClosureOutlivesRequirement<'tcx> {
// This region or type...
pub subject: ClosureOutlivesSubject<'tcx>,
//... must outlive this one.
pub outlived_free_region: ty::RegionVid,
// If not, report an error here...
pub blame_span: Span,
//... due to this reason.
pub category: ConstraintCategory,
}
/// Outlives-constraints can be categorized to determine whether and why they
/// are interesting (for error reporting). Order of variants indicates sort
/// order of the category, thereby influencing diagnostic output.
///
/// See also `rustc_mir::borrow_check::constraints`.
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)]
#[derive(TyEncodable, TyDecodable, HashStable)]
pub enum ConstraintCategory {
Return(ReturnConstraint),
Yield,
UseAsConst,
UseAsStatic,
TypeAnnotation,
Cast,
/// A constraint that came from checking the body of a closure.
///
/// We try to get the category that the closure used when reporting this.
ClosureBounds,
CallArgument,
CopyBound,
SizedBound,
Assignment,
OpaqueType,
ClosureUpvar(hir::HirId),
/// A "boring" constraint (caused by the given location) is one that
/// the user probably doesn't want to see described in diagnostics,
/// because it is kind of an artifact of the type system setup.
/// Example: `x = Foo { field: y }` technically creates
/// intermediate regions representing the "type of `Foo { field: y
/// }`", and data flows from `y` into those variables, but they
/// are not very interesting. The assignment into `x` on the other
/// hand might be.
Boring,
// Boring and applicable everywhere.
BoringNoLocation,
/// A constraint that doesn't correspond to anything the user sees.
Internal,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)]
#[derive(TyEncodable, TyDecodable, HashStable)]
pub enum ReturnConstraint {
Normal,
ClosureUpvar(hir::HirId),
}
/// The subject of a `ClosureOutlivesRequirement` -- that is, the thing
/// that must outlive some region.
#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
pub enum ClosureOutlivesSubject<'tcx> {
/// Subject is a type, typically a type parameter, but could also
/// be a projection. Indicates a requirement like `T: 'a` being
/// passed to the caller, where the type here is `T`.
///
/// The type here is guaranteed not to contain any free regions at
/// present.
Ty(Ty<'tcx>),
/// Subject is a free region from the closure. Indicates a requirement
/// like `'a: 'b` being passed to the caller; the region here is `'a`.
Region(ty::RegionVid),
}
/// The constituent parts of an ADT or array.
#[derive(Copy, Clone, Debug, HashStable)]
pub struct DestructuredConst<'tcx> {
pub variant: Option<VariantIdx>,
pub fields: &'tcx [&'tcx ty::Const<'tcx>],
}
/// Coverage information summarized from a MIR if instrumented for source code coverage (see
/// compiler option `-Zinstrument-coverage`). This information is generated by the
/// `InstrumentCoverage` MIR pass and can be retrieved via the `coverageinfo` query.
#[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable)]
pub struct CoverageInfo {
/// The total number of coverage region counters added to the MIR `Body`.
pub num_counters: u32,
/// The total number of coverage region counter expressions added to the MIR `Body`.
pub num_expressions: u32,
}
/// Shims which make dealing with `WithOptConstParam` easier.
///
/// For more information on why this is needed, consider looking
/// at the docs for `WithOptConstParam` itself.
impl<'tcx> TyCtxt<'tcx> {
#[inline]
pub fn mir_const_qualif_opt_const_arg(
self,
def: ty::WithOptConstParam<LocalDefId>,
) -> ConstQualifs {
if let Some(param_did) = def.const_param_did {
self.mir_const_qualif_const_arg((def.did, param_did))
} else {
self.mir_const_qualif(def.did)
}
}
#[inline]
pub fn promoted_mir_opt_const_arg(
self,
def: ty::WithOptConstParam<DefId>,
) -> &'tcx IndexVec<Promoted, Body<'tcx>> {
if let Some((did, param_did)) = def.as_const_arg() {
self.promoted_mir_of_const_arg((did, param_did))
} else {
self.promoted_mir(def.did)
}
}
#[inline]
pub fn mir_for_ctfe_opt_const_arg(self, def: ty::WithOptConstParam<DefId>) -> &'tcx Body<'tcx> {
if let Some((did, param_did)) = def.as_const_arg() {
self.mir_for_ctfe_of_const_arg((did, param_did))
} else {
self.mir_for_ctfe(def.did)
}
}
#[inline]
pub fn mir_abstract_const_opt_const_arg(
self,
def: ty::WithOptConstParam<DefId>,
) -> Result<Option<&'tcx [abstract_const::Node<'tcx>]>, ErrorReported> {
if let Some((did, param_did)) = def.as_const_arg() {
self.mir_abstract_const_of_const_arg((did, param_did))
} else {
self.mir_abstract_const(def.did)
}
}
}
| {
Self(Cell::new(Some(Box::new(iter))))
} | identifier_body |
query.rs | //! Values computed by queries that use MIR.
use crate::mir::{abstract_const, Body, Promoted};
use crate::ty::{self, Ty, TyCtxt};
use rustc_data_structures::sync::Lrc;
use rustc_data_structures::vec_map::VecMap;
use rustc_errors::ErrorReported;
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_index::bit_set::BitMatrix;
use rustc_index::vec::IndexVec;
use rustc_middle::ty::OpaqueTypeKey;
use rustc_span::Span;
use rustc_target::abi::VariantIdx;
use smallvec::SmallVec;
use std::cell::Cell;
use std::fmt::{self, Debug};
use super::{Field, SourceInfo};
#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
pub enum UnsafetyViolationKind {
/// Unsafe operation outside `unsafe`.
General,
/// Unsafe operation in an `unsafe fn` but outside an `unsafe` block.
/// Has to be handled as a lint for backwards compatibility.
UnsafeFn,
}
#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
pub enum UnsafetyViolationDetails {
CallToUnsafeFunction,
UseOfInlineAssembly,
InitializingTypeWith,
CastOfPointerToInt,
UseOfMutableStatic,
UseOfExternStatic,
DerefOfRawPointer,
AssignToDroppingUnionField,
AccessToUnionField,
MutationOfLayoutConstrainedField,
BorrowOfLayoutConstrainedField,
CallToFunctionWith,
}
impl UnsafetyViolationDetails {
pub fn description_and_note(&self) -> (&'static str, &'static str) {
use UnsafetyViolationDetails::*;
match self {
CallToUnsafeFunction => (
"call to unsafe function",
"consult the function's documentation for information on how to avoid undefined \
behavior",
),
UseOfInlineAssembly => (
"use of inline assembly",
"inline assembly is entirely unchecked and can cause undefined behavior",
),
InitializingTypeWith => (
"initializing type with `rustc_layout_scalar_valid_range` attr",
"initializing a layout restricted type's field with a value outside the valid \
range is undefined behavior",
),
CastOfPointerToInt => {
("cast of pointer to int", "casting pointers to integers in constants")
}
UseOfMutableStatic => (
"use of mutable static",
"mutable statics can be mutated by multiple threads: aliasing violations or data \
races will cause undefined behavior",
),
UseOfExternStatic => (
"use of extern static",
"extern statics are not controlled by the Rust type system: invalid data, \
aliasing violations or data races will cause undefined behavior",
),
DerefOfRawPointer => (
"dereference of raw pointer",
"raw pointers may be null, dangling or unaligned; they can violate aliasing rules \
and cause data races: all of these are undefined behavior",
),
AssignToDroppingUnionField => (
"assignment to union field that might need dropping",
"the previous content of the field will be dropped, which causes undefined \
behavior if the field was not properly initialized",
),
AccessToUnionField => (
"access to union field",
"the field may not be properly initialized: using uninitialized data will cause \
undefined behavior",
),
MutationOfLayoutConstrainedField => (
"mutation of layout constrained field",
"mutating layout constrained fields cannot statically be checked for valid values",
),
BorrowOfLayoutConstrainedField => (
"borrow of layout constrained field with interior mutability",
"references to fields of layout constrained fields lose the constraints. Coupled \
with interior mutability, the field can be changed to invalid values",
),
CallToFunctionWith => (
"call to function with `#[target_feature]`",
"can only be called if the required target features are available",
),
}
}
}
#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
pub struct UnsafetyViolation {
pub source_info: SourceInfo,
pub lint_root: hir::HirId,
pub kind: UnsafetyViolationKind,
pub details: UnsafetyViolationDetails,
}
#[derive(Clone, TyEncodable, TyDecodable, HashStable, Debug)]
pub struct UnsafetyCheckResult {
/// Violations that are propagated *upwards* from this function.
pub violations: Lrc<[UnsafetyViolation]>,
/// `unsafe` blocks in this function, along with whether they are used. This is
/// used for the "unused_unsafe" lint.
pub unsafe_blocks: Lrc<[(hir::HirId, bool)]>,
}
rustc_index::newtype_index! {
pub struct GeneratorSavedLocal {
derive [HashStable]
DEBUG_FORMAT = "_{}",
}
}
/// The layout of generator state.
#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
pub struct GeneratorLayout<'tcx> {
/// The type of every local stored inside the generator.
pub field_tys: IndexVec<GeneratorSavedLocal, Ty<'tcx>>,
/// Which of the above fields are in each variant. Note that one field may
/// be stored in multiple variants.
pub variant_fields: IndexVec<VariantIdx, IndexVec<Field, GeneratorSavedLocal>>,
/// The source that led to each variant being created (usually, a yield or
/// await).
pub variant_source_info: IndexVec<VariantIdx, SourceInfo>,
/// Which saved locals are storage-live at the same time. Locals that do not
/// have conflicts with each other are allowed to overlap in the computed
/// layout.
pub storage_conflicts: BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal>,
}
impl Debug for GeneratorLayout<'_> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
/// Prints an iterator of (key, value) tuples as a map.
struct MapPrinter<'a, K, V>(Cell<Option<Box<dyn Iterator<Item = (K, V)> + 'a>>>);
impl<'a, K, V> MapPrinter<'a, K, V> {
fn new(iter: impl Iterator<Item = (K, V)> + 'a) -> Self {
Self(Cell::new(Some(Box::new(iter))))
}
}
impl<'a, K: Debug, V: Debug> Debug for MapPrinter<'a, K, V> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_map().entries(self.0.take().unwrap()).finish()
}
}
/// Prints the generator variant name.
struct GenVariantPrinter(VariantIdx);
impl From<VariantIdx> for GenVariantPrinter {
fn from(idx: VariantIdx) -> Self {
GenVariantPrinter(idx)
}
}
impl Debug for GenVariantPrinter {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let variant_name = ty::GeneratorSubsts::variant_name(self.0);
if fmt.alternate() {
write!(fmt, "{:9}({:?})", variant_name, self.0)
} else {
write!(fmt, "{}", variant_name)
}
}
}
/// Forces its contents to print in regular mode instead of alternate mode.
struct OneLinePrinter<T>(T);
impl<T: Debug> Debug for OneLinePrinter<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "{:?}", self.0)
}
}
fmt.debug_struct("GeneratorLayout")
.field("field_tys", &MapPrinter::new(self.field_tys.iter_enumerated()))
.field(
"variant_fields",
&MapPrinter::new(
self.variant_fields
.iter_enumerated()
.map(|(k, v)| (GenVariantPrinter(k), OneLinePrinter(v))),
),
)
.field("storage_conflicts", &self.storage_conflicts)
.finish()
}
}
#[derive(Debug, TyEncodable, TyDecodable, HashStable)]
pub struct BorrowCheckResult<'tcx> {
/// All the opaque types that are restricted to concrete types
/// by this function. Unlike the value in `TypeckResults`, this has
/// unerased regions.
pub concrete_opaque_types: VecMap<OpaqueTypeKey<'tcx>, Ty<'tcx>>,
pub closure_requirements: Option<ClosureRegionRequirements<'tcx>>,
pub used_mut_upvars: SmallVec<[Field; 8]>,
}
/// The result of the `mir_const_qualif` query.
///
/// Each field (except `error_occured`) corresponds to an implementer of the `Qualif` trait in
/// `rustc_mir/src/transform/check_consts/qualifs.rs`. See that file for more information on each
/// `Qualif`.
#[derive(Clone, Copy, Debug, Default, TyEncodable, TyDecodable, HashStable)]
pub struct ConstQualifs {
pub has_mut_interior: bool,
pub needs_drop: bool,
pub custom_eq: bool,
pub error_occured: Option<ErrorReported>,
}
/// After we borrow check a closure, we are left with various
/// requirements that we have inferred between the free regions that
/// appear in the closure's signature or on its field types. These
/// requirements are then verified and proved by the closure's
/// creating function. This struct encodes those requirements.
///
/// The requirements are listed as being between various `RegionVid`. The 0th
/// region refers to `'static`; subsequent region vids refer to the free
/// regions that appear in the closure (or generator's) type, in order of
/// appearance. (This numbering is actually defined by the `UniversalRegions`
/// struct in the NLL region checker. See for example
/// `UniversalRegions::closure_mapping`.) Note the free regions in the
/// closure's signature and captures are erased.
///
/// Example: If type check produces a closure with the closure substs:
///
/// ```text
/// ClosureSubsts = [
/// 'a, // From the parent.
/// 'b,
/// i8, // the "closure kind"
/// for<'x> fn(&'<erased> &'x u32) -> &'x u32, // the "closure signature"
/// &'<erased> String, // some upvar
/// ]
/// ```
///
/// We would "renumber" each free region to a unique vid, as follows:
///
/// ```text
/// ClosureSubsts = [
/// '1, // From the parent.
/// '2,
/// i8, // the "closure kind"
/// for<'x> fn(&'3 &'x u32) -> &'x u32, // the "closure signature"
/// &'4 String, // some upvar
/// ]
/// ```
///
/// Now the code might impose a requirement like `'1: '2`. When an
/// instance of the closure is created, the corresponding free regions
/// can be extracted from its type and constrained to have the given
/// outlives relationship.
///
/// In some cases, we have to record outlives requirements between types and
/// regions as well. In that case, if those types include any regions, those
/// regions are recorded using their external names (`ReStatic`,
/// `ReEarlyBound`, `ReFree`). We use these because in a query response we
/// cannot use `ReVar` (which is what we use internally within the rest of the
/// NLL code).
#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
pub struct ClosureRegionRequirements<'tcx> {
/// The number of external regions defined on the closure. In our
/// example above, it would be 3 -- one for `'static`, then `'1`
/// and `'2`. This is just used for a sanity check later on, to
/// make sure that the number of regions we see at the callsite
/// matches.
pub num_external_vids: usize,
/// Requirements between the various free regions defined in
/// indices.
pub outlives_requirements: Vec<ClosureOutlivesRequirement<'tcx>>,
}
/// Indicates an outlives-constraint between a type or between two
/// free regions declared on the closure.
#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
pub struct ClosureOutlivesRequirement<'tcx> {
// This region or type...
pub subject: ClosureOutlivesSubject<'tcx>,
//... must outlive this one.
pub outlived_free_region: ty::RegionVid,
// If not, report an error here...
pub blame_span: Span,
//... due to this reason.
pub category: ConstraintCategory,
}
/// Outlives-constraints can be categorized to determine whether and why they
/// are interesting (for error reporting). Order of variants indicates sort
/// order of the category, thereby influencing diagnostic output.
///
/// See also `rustc_mir::borrow_check::constraints`.
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)]
#[derive(TyEncodable, TyDecodable, HashStable)]
pub enum ConstraintCategory {
Return(ReturnConstraint),
Yield,
UseAsConst,
UseAsStatic,
TypeAnnotation,
Cast,
/// A constraint that came from checking the body of a closure.
///
/// We try to get the category that the closure used when reporting this.
ClosureBounds,
CallArgument,
CopyBound,
SizedBound,
Assignment,
OpaqueType,
ClosureUpvar(hir::HirId),
/// A "boring" constraint (caused by the given location) is one that
/// the user probably doesn't want to see described in diagnostics,
/// because it is kind of an artifact of the type system setup.
/// Example: `x = Foo { field: y }` technically creates
/// intermediate regions representing the "type of `Foo { field: y
/// }`", and data flows from `y` into those variables, but they
/// are not very interesting. The assignment into `x` on the other
/// hand might be.
Boring,
// Boring and applicable everywhere.
BoringNoLocation,
/// A constraint that doesn't correspond to anything the user sees.
Internal,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)]
#[derive(TyEncodable, TyDecodable, HashStable)]
pub enum ReturnConstraint {
Normal,
ClosureUpvar(hir::HirId),
}
/// The subject of a `ClosureOutlivesRequirement` -- that is, the thing
/// that must outlive some region.
#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
pub enum ClosureOutlivesSubject<'tcx> {
/// Subject is a type, typically a type parameter, but could also
/// be a projection. Indicates a requirement like `T: 'a` being
/// passed to the caller, where the type here is `T`.
///
/// The type here is guaranteed not to contain any free regions at
/// present.
Ty(Ty<'tcx>),
/// Subject is a free region from the closure. Indicates a requirement
/// like `'a: 'b` being passed to the caller; the region here is `'a`.
Region(ty::RegionVid),
}
/// The constituent parts of an ADT or array.
#[derive(Copy, Clone, Debug, HashStable)]
pub struct DestructuredConst<'tcx> {
pub variant: Option<VariantIdx>,
pub fields: &'tcx [&'tcx ty::Const<'tcx>],
}
/// Coverage information summarized from a MIR if instrumented for source code coverage (see
/// compiler option `-Zinstrument-coverage`). This information is generated by the
/// `InstrumentCoverage` MIR pass and can be retrieved via the `coverageinfo` query.
#[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable)]
pub struct CoverageInfo {
/// The total number of coverage region counters added to the MIR `Body`.
pub num_counters: u32,
/// The total number of coverage region counter expressions added to the MIR `Body`.
pub num_expressions: u32,
}
/// Shims which make dealing with `WithOptConstParam` easier.
///
/// For more information on why this is needed, consider looking
/// at the docs for `WithOptConstParam` itself.
impl<'tcx> TyCtxt<'tcx> {
#[inline]
pub fn mir_const_qualif_opt_const_arg(
self,
def: ty::WithOptConstParam<LocalDefId>,
) -> ConstQualifs {
if let Some(param_did) = def.const_param_did {
self.mir_const_qualif_const_arg((def.did, param_did))
} else {
self.mir_const_qualif(def.did)
}
}
#[inline]
pub fn promoted_mir_opt_const_arg(
self,
def: ty::WithOptConstParam<DefId>,
) -> &'tcx IndexVec<Promoted, Body<'tcx>> {
if let Some((did, param_did)) = def.as_const_arg() {
self.promoted_mir_of_const_arg((did, param_did))
} else {
self.promoted_mir(def.did)
}
}
#[inline]
pub fn | (self, def: ty::WithOptConstParam<DefId>) -> &'tcx Body<'tcx> {
if let Some((did, param_did)) = def.as_const_arg() {
self.mir_for_ctfe_of_const_arg((did, param_did))
} else {
self.mir_for_ctfe(def.did)
}
}
#[inline]
pub fn mir_abstract_const_opt_const_arg(
self,
def: ty::WithOptConstParam<DefId>,
) -> Result<Option<&'tcx [abstract_const::Node<'tcx>]>, ErrorReported> {
if let Some((did, param_did)) = def.as_const_arg() {
self.mir_abstract_const_of_const_arg((did, param_did))
} else {
self.mir_abstract_const(def.did)
}
}
}
| mir_for_ctfe_opt_const_arg | identifier_name |
query.rs | //! Values computed by queries that use MIR.
use crate::mir::{abstract_const, Body, Promoted};
use crate::ty::{self, Ty, TyCtxt};
use rustc_data_structures::sync::Lrc;
use rustc_data_structures::vec_map::VecMap;
use rustc_errors::ErrorReported;
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_index::bit_set::BitMatrix;
use rustc_index::vec::IndexVec;
use rustc_middle::ty::OpaqueTypeKey;
use rustc_span::Span;
use rustc_target::abi::VariantIdx;
use smallvec::SmallVec;
use std::cell::Cell;
use std::fmt::{self, Debug};
use super::{Field, SourceInfo};
#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
pub enum UnsafetyViolationKind {
/// Unsafe operation outside `unsafe`.
General,
/// Unsafe operation in an `unsafe fn` but outside an `unsafe` block.
/// Has to be handled as a lint for backwards compatibility.
UnsafeFn,
}
#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
pub enum UnsafetyViolationDetails {
CallToUnsafeFunction,
UseOfInlineAssembly,
InitializingTypeWith,
CastOfPointerToInt,
UseOfMutableStatic,
UseOfExternStatic,
DerefOfRawPointer,
AssignToDroppingUnionField, |
impl UnsafetyViolationDetails {
pub fn description_and_note(&self) -> (&'static str, &'static str) {
use UnsafetyViolationDetails::*;
match self {
CallToUnsafeFunction => (
"call to unsafe function",
"consult the function's documentation for information on how to avoid undefined \
behavior",
),
UseOfInlineAssembly => (
"use of inline assembly",
"inline assembly is entirely unchecked and can cause undefined behavior",
),
InitializingTypeWith => (
"initializing type with `rustc_layout_scalar_valid_range` attr",
"initializing a layout restricted type's field with a value outside the valid \
range is undefined behavior",
),
CastOfPointerToInt => {
("cast of pointer to int", "casting pointers to integers in constants")
}
UseOfMutableStatic => (
"use of mutable static",
"mutable statics can be mutated by multiple threads: aliasing violations or data \
races will cause undefined behavior",
),
UseOfExternStatic => (
"use of extern static",
"extern statics are not controlled by the Rust type system: invalid data, \
aliasing violations or data races will cause undefined behavior",
),
DerefOfRawPointer => (
"dereference of raw pointer",
"raw pointers may be null, dangling or unaligned; they can violate aliasing rules \
and cause data races: all of these are undefined behavior",
),
AssignToDroppingUnionField => (
"assignment to union field that might need dropping",
"the previous content of the field will be dropped, which causes undefined \
behavior if the field was not properly initialized",
),
AccessToUnionField => (
"access to union field",
"the field may not be properly initialized: using uninitialized data will cause \
undefined behavior",
),
MutationOfLayoutConstrainedField => (
"mutation of layout constrained field",
"mutating layout constrained fields cannot statically be checked for valid values",
),
BorrowOfLayoutConstrainedField => (
"borrow of layout constrained field with interior mutability",
"references to fields of layout constrained fields lose the constraints. Coupled \
with interior mutability, the field can be changed to invalid values",
),
CallToFunctionWith => (
"call to function with `#[target_feature]`",
"can only be called if the required target features are available",
),
}
}
}
#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
pub struct UnsafetyViolation {
pub source_info: SourceInfo,
pub lint_root: hir::HirId,
pub kind: UnsafetyViolationKind,
pub details: UnsafetyViolationDetails,
}
#[derive(Clone, TyEncodable, TyDecodable, HashStable, Debug)]
pub struct UnsafetyCheckResult {
/// Violations that are propagated *upwards* from this function.
pub violations: Lrc<[UnsafetyViolation]>,
/// `unsafe` blocks in this function, along with whether they are used. This is
/// used for the "unused_unsafe" lint.
pub unsafe_blocks: Lrc<[(hir::HirId, bool)]>,
}
rustc_index::newtype_index! {
pub struct GeneratorSavedLocal {
derive [HashStable]
DEBUG_FORMAT = "_{}",
}
}
/// The layout of generator state.
#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
pub struct GeneratorLayout<'tcx> {
/// The type of every local stored inside the generator.
pub field_tys: IndexVec<GeneratorSavedLocal, Ty<'tcx>>,
/// Which of the above fields are in each variant. Note that one field may
/// be stored in multiple variants.
pub variant_fields: IndexVec<VariantIdx, IndexVec<Field, GeneratorSavedLocal>>,
/// The source that led to each variant being created (usually, a yield or
/// await).
pub variant_source_info: IndexVec<VariantIdx, SourceInfo>,
/// Which saved locals are storage-live at the same time. Locals that do not
/// have conflicts with each other are allowed to overlap in the computed
/// layout.
pub storage_conflicts: BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal>,
}
impl Debug for GeneratorLayout<'_> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
/// Prints an iterator of (key, value) tuples as a map.
struct MapPrinter<'a, K, V>(Cell<Option<Box<dyn Iterator<Item = (K, V)> + 'a>>>);
impl<'a, K, V> MapPrinter<'a, K, V> {
fn new(iter: impl Iterator<Item = (K, V)> + 'a) -> Self {
Self(Cell::new(Some(Box::new(iter))))
}
}
impl<'a, K: Debug, V: Debug> Debug for MapPrinter<'a, K, V> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_map().entries(self.0.take().unwrap()).finish()
}
}
/// Prints the generator variant name.
struct GenVariantPrinter(VariantIdx);
impl From<VariantIdx> for GenVariantPrinter {
fn from(idx: VariantIdx) -> Self {
GenVariantPrinter(idx)
}
}
impl Debug for GenVariantPrinter {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let variant_name = ty::GeneratorSubsts::variant_name(self.0);
if fmt.alternate() {
write!(fmt, "{:9}({:?})", variant_name, self.0)
} else {
write!(fmt, "{}", variant_name)
}
}
}
/// Forces its contents to print in regular mode instead of alternate mode.
struct OneLinePrinter<T>(T);
impl<T: Debug> Debug for OneLinePrinter<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "{:?}", self.0)
}
}
fmt.debug_struct("GeneratorLayout")
.field("field_tys", &MapPrinter::new(self.field_tys.iter_enumerated()))
.field(
"variant_fields",
&MapPrinter::new(
self.variant_fields
.iter_enumerated()
.map(|(k, v)| (GenVariantPrinter(k), OneLinePrinter(v))),
),
)
.field("storage_conflicts", &self.storage_conflicts)
.finish()
}
}
#[derive(Debug, TyEncodable, TyDecodable, HashStable)]
pub struct BorrowCheckResult<'tcx> {
/// All the opaque types that are restricted to concrete types
/// by this function. Unlike the value in `TypeckResults`, this has
/// unerased regions.
pub concrete_opaque_types: VecMap<OpaqueTypeKey<'tcx>, Ty<'tcx>>,
pub closure_requirements: Option<ClosureRegionRequirements<'tcx>>,
pub used_mut_upvars: SmallVec<[Field; 8]>,
}
/// The result of the `mir_const_qualif` query.
///
/// Each field (except `error_occured`) corresponds to an implementer of the `Qualif` trait in
/// `rustc_mir/src/transform/check_consts/qualifs.rs`. See that file for more information on each
/// `Qualif`.
#[derive(Clone, Copy, Debug, Default, TyEncodable, TyDecodable, HashStable)]
pub struct ConstQualifs {
pub has_mut_interior: bool,
pub needs_drop: bool,
pub custom_eq: bool,
pub error_occured: Option<ErrorReported>,
}
/// After we borrow check a closure, we are left with various
/// requirements that we have inferred between the free regions that
/// appear in the closure's signature or on its field types. These
/// requirements are then verified and proved by the closure's
/// creating function. This struct encodes those requirements.
///
/// The requirements are listed as being between various `RegionVid`. The 0th
/// region refers to `'static`; subsequent region vids refer to the free
/// regions that appear in the closure (or generator's) type, in order of
/// appearance. (This numbering is actually defined by the `UniversalRegions`
/// struct in the NLL region checker. See for example
/// `UniversalRegions::closure_mapping`.) Note the free regions in the
/// closure's signature and captures are erased.
///
/// Example: If type check produces a closure with the closure substs:
///
/// ```text
/// ClosureSubsts = [
/// 'a, // From the parent.
/// 'b,
/// i8, // the "closure kind"
/// for<'x> fn(&'<erased> &'x u32) -> &'x u32, // the "closure signature"
/// &'<erased> String, // some upvar
/// ]
/// ```
///
/// We would "renumber" each free region to a unique vid, as follows:
///
/// ```text
/// ClosureSubsts = [
/// '1, // From the parent.
/// '2,
/// i8, // the "closure kind"
/// for<'x> fn(&'3 &'x u32) -> &'x u32, // the "closure signature"
/// &'4 String, // some upvar
/// ]
/// ```
///
/// Now the code might impose a requirement like `'1: '2`. When an
/// instance of the closure is created, the corresponding free regions
/// can be extracted from its type and constrained to have the given
/// outlives relationship.
///
/// In some cases, we have to record outlives requirements between types and
/// regions as well. In that case, if those types include any regions, those
/// regions are recorded using their external names (`ReStatic`,
/// `ReEarlyBound`, `ReFree`). We use these because in a query response we
/// cannot use `ReVar` (which is what we use internally within the rest of the
/// NLL code).
#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
pub struct ClosureRegionRequirements<'tcx> {
/// The number of external regions defined on the closure. In our
/// example above, it would be 3 -- one for `'static`, then `'1`
/// and `'2`. This is just used for a sanity check later on, to
/// make sure that the number of regions we see at the callsite
/// matches.
pub num_external_vids: usize,
/// Requirements between the various free regions defined in
/// indices.
pub outlives_requirements: Vec<ClosureOutlivesRequirement<'tcx>>,
}
/// Indicates an outlives-constraint between a type or between two
/// free regions declared on the closure.
#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
pub struct ClosureOutlivesRequirement<'tcx> {
// This region or type...
pub subject: ClosureOutlivesSubject<'tcx>,
//... must outlive this one.
pub outlived_free_region: ty::RegionVid,
// If not, report an error here...
pub blame_span: Span,
//... due to this reason.
pub category: ConstraintCategory,
}
/// Outlives-constraints can be categorized to determine whether and why they
/// are interesting (for error reporting). Order of variants indicates sort
/// order of the category, thereby influencing diagnostic output.
///
/// See also `rustc_mir::borrow_check::constraints`.
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)]
#[derive(TyEncodable, TyDecodable, HashStable)]
pub enum ConstraintCategory {
Return(ReturnConstraint),
Yield,
UseAsConst,
UseAsStatic,
TypeAnnotation,
Cast,
/// A constraint that came from checking the body of a closure.
///
/// We try to get the category that the closure used when reporting this.
ClosureBounds,
CallArgument,
CopyBound,
SizedBound,
Assignment,
OpaqueType,
ClosureUpvar(hir::HirId),
/// A "boring" constraint (caused by the given location) is one that
/// the user probably doesn't want to see described in diagnostics,
/// because it is kind of an artifact of the type system setup.
/// Example: `x = Foo { field: y }` technically creates
/// intermediate regions representing the "type of `Foo { field: y
/// }`", and data flows from `y` into those variables, but they
/// are not very interesting. The assignment into `x` on the other
/// hand might be.
Boring,
// Boring and applicable everywhere.
BoringNoLocation,
/// A constraint that doesn't correspond to anything the user sees.
Internal,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)]
#[derive(TyEncodable, TyDecodable, HashStable)]
pub enum ReturnConstraint {
Normal,
ClosureUpvar(hir::HirId),
}
/// The subject of a `ClosureOutlivesRequirement` -- that is, the thing
/// that must outlive some region.
#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
pub enum ClosureOutlivesSubject<'tcx> {
/// Subject is a type, typically a type parameter, but could also
/// be a projection. Indicates a requirement like `T: 'a` being
/// passed to the caller, where the type here is `T`.
///
/// The type here is guaranteed not to contain any free regions at
/// present.
Ty(Ty<'tcx>),
/// Subject is a free region from the closure. Indicates a requirement
/// like `'a: 'b` being passed to the caller; the region here is `'a`.
Region(ty::RegionVid),
}
/// The constituent parts of an ADT or array.
#[derive(Copy, Clone, Debug, HashStable)]
pub struct DestructuredConst<'tcx> {
pub variant: Option<VariantIdx>,
pub fields: &'tcx [&'tcx ty::Const<'tcx>],
}
/// Coverage information summarized from a MIR if instrumented for source code coverage (see
/// compiler option `-Zinstrument-coverage`). This information is generated by the
/// `InstrumentCoverage` MIR pass and can be retrieved via the `coverageinfo` query.
#[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable)]
pub struct CoverageInfo {
/// The total number of coverage region counters added to the MIR `Body`.
pub num_counters: u32,
/// The total number of coverage region counter expressions added to the MIR `Body`.
pub num_expressions: u32,
}
/// Shims which make dealing with `WithOptConstParam` easier.
///
/// For more information on why this is needed, consider looking
/// at the docs for `WithOptConstParam` itself.
impl<'tcx> TyCtxt<'tcx> {
#[inline]
pub fn mir_const_qualif_opt_const_arg(
self,
def: ty::WithOptConstParam<LocalDefId>,
) -> ConstQualifs {
if let Some(param_did) = def.const_param_did {
self.mir_const_qualif_const_arg((def.did, param_did))
} else {
self.mir_const_qualif(def.did)
}
}
#[inline]
pub fn promoted_mir_opt_const_arg(
self,
def: ty::WithOptConstParam<DefId>,
) -> &'tcx IndexVec<Promoted, Body<'tcx>> {
if let Some((did, param_did)) = def.as_const_arg() {
self.promoted_mir_of_const_arg((did, param_did))
} else {
self.promoted_mir(def.did)
}
}
#[inline]
pub fn mir_for_ctfe_opt_const_arg(self, def: ty::WithOptConstParam<DefId>) -> &'tcx Body<'tcx> {
if let Some((did, param_did)) = def.as_const_arg() {
self.mir_for_ctfe_of_const_arg((did, param_did))
} else {
self.mir_for_ctfe(def.did)
}
}
#[inline]
pub fn mir_abstract_const_opt_const_arg(
self,
def: ty::WithOptConstParam<DefId>,
) -> Result<Option<&'tcx [abstract_const::Node<'tcx>]>, ErrorReported> {
if let Some((did, param_did)) = def.as_const_arg() {
self.mir_abstract_const_of_const_arg((did, param_did))
} else {
self.mir_abstract_const(def.did)
}
}
} | AccessToUnionField,
MutationOfLayoutConstrainedField,
BorrowOfLayoutConstrainedField,
CallToFunctionWith,
} | random_line_split |
add_enchantment.rs | use rune_vm::Rune;
use rustc_serialize::json;
use game_state::GameState;
use minion_card::UID;
use hlua;
#[derive(RustcDecodable, RustcEncodable, Clone)]
pub struct AddEnchantment {
target_uid: UID,
source_uid: UID,
}
impl AddEnchantment {
pub fn new(source_uid: UID, target_uid: UID) -> AddEnchantment {
AddEnchantment {
source_uid: source_uid,
target_uid: target_uid,
}
}
}
implement_for_lua!(AddEnchantment, |mut _metatable| {});
| fn execute_rune(&self, mut game_state: &mut GameState) {
game_state.get_mut_minion(self.target_uid).unwrap().add_enchantment(self.source_uid);
}
fn can_see(&self, _controller: UID, _game_state: &GameState) -> bool {
return true;
}
fn to_json(&self) -> String {
json::encode(self).unwrap().replace("{", "{\"runeType\":\"AddEnchantment\",")
}
fn into_box(&self) -> Box<Rune> {
Box::new(self.clone())
}
} | impl Rune for AddEnchantment { | random_line_split |
add_enchantment.rs | use rune_vm::Rune;
use rustc_serialize::json;
use game_state::GameState;
use minion_card::UID;
use hlua;
#[derive(RustcDecodable, RustcEncodable, Clone)]
pub struct AddEnchantment {
target_uid: UID,
source_uid: UID,
}
impl AddEnchantment {
pub fn | (source_uid: UID, target_uid: UID) -> AddEnchantment {
AddEnchantment {
source_uid: source_uid,
target_uid: target_uid,
}
}
}
implement_for_lua!(AddEnchantment, |mut _metatable| {});
impl Rune for AddEnchantment {
fn execute_rune(&self, mut game_state: &mut GameState) {
game_state.get_mut_minion(self.target_uid).unwrap().add_enchantment(self.source_uid);
}
fn can_see(&self, _controller: UID, _game_state: &GameState) -> bool {
return true;
}
fn to_json(&self) -> String {
json::encode(self).unwrap().replace("{", "{\"runeType\":\"AddEnchantment\",")
}
fn into_box(&self) -> Box<Rune> {
Box::new(self.clone())
}
}
| new | identifier_name |
texture_swap.rs | extern crate rand;
extern crate piston_window;
extern crate image as im;
use piston_window::*;
fn main() {
let texture_count = 1024;
let frames = 200;
let size = 32.0;
let mut window: PistonWindow = WindowSettings::new("piston", [1024; 2]).build().unwrap();
let mut texture_context = TextureContext {
factory: window.factory.clone(),
encoder: window.factory.create_command_buffer().into()
};
let textures = {
(0..texture_count).map(|_| {
let mut img = im::ImageBuffer::new(2, 2);
for x in 0..2 {
for y in 0..2 {
img.put_pixel(x, y,
im::Rgba([rand::random(), rand::random(), rand::random(), 255]));
}
}
Texture::from_image(
&mut texture_context,
&img,
&TextureSettings::new()
).unwrap()
}).collect::<Vec<Texture<_>>>()
};
let mut positions = (0..texture_count)
.map(|_| (rand::random(), rand::random()))
.collect::<Vec<(f64, f64)>>();
| if counter > frames { break; }
}
window.draw_2d(&e, |c, g, _| {
clear([0.0, 0.0, 0.0, 1.0], g);
for p in &mut positions {
let (x, y) = *p;
*p = (x + (rand::random::<f64>() - 0.5) * 0.01,
y + (rand::random::<f64>() - 0.5) * 0.01);
}
for i in 0..texture_count {
let p = positions[i];
image(&textures[i], c.transform
.trans(p.0 * 1024.0, p.1 * 1024.0).zoom(size), g);
}
});
}
} | let mut counter = 0;
window.set_bench_mode(true);
while let Some(e) = window.next() {
if e.render_args().is_some() {
counter += 1; | random_line_split |
texture_swap.rs | extern crate rand;
extern crate piston_window;
extern crate image as im;
use piston_window::*;
fn main() {
let texture_count = 1024;
let frames = 200;
let size = 32.0;
let mut window: PistonWindow = WindowSettings::new("piston", [1024; 2]).build().unwrap();
let mut texture_context = TextureContext {
factory: window.factory.clone(),
encoder: window.factory.create_command_buffer().into()
};
let textures = {
(0..texture_count).map(|_| {
let mut img = im::ImageBuffer::new(2, 2);
for x in 0..2 {
for y in 0..2 {
img.put_pixel(x, y,
im::Rgba([rand::random(), rand::random(), rand::random(), 255]));
}
}
Texture::from_image(
&mut texture_context,
&img,
&TextureSettings::new()
).unwrap()
}).collect::<Vec<Texture<_>>>()
};
let mut positions = (0..texture_count)
.map(|_| (rand::random(), rand::random()))
.collect::<Vec<(f64, f64)>>();
let mut counter = 0;
window.set_bench_mode(true);
while let Some(e) = window.next() {
if e.render_args().is_some() |
window.draw_2d(&e, |c, g, _| {
clear([0.0, 0.0, 0.0, 1.0], g);
for p in &mut positions {
let (x, y) = *p;
*p = (x + (rand::random::<f64>() - 0.5) * 0.01,
y + (rand::random::<f64>() - 0.5) * 0.01);
}
for i in 0..texture_count {
let p = positions[i];
image(&textures[i], c.transform
.trans(p.0 * 1024.0, p.1 * 1024.0).zoom(size), g);
}
});
}
}
| {
counter += 1;
if counter > frames { break; }
} | conditional_block |
texture_swap.rs | extern crate rand;
extern crate piston_window;
extern crate image as im;
use piston_window::*;
fn | () {
let texture_count = 1024;
let frames = 200;
let size = 32.0;
let mut window: PistonWindow = WindowSettings::new("piston", [1024; 2]).build().unwrap();
let mut texture_context = TextureContext {
factory: window.factory.clone(),
encoder: window.factory.create_command_buffer().into()
};
let textures = {
(0..texture_count).map(|_| {
let mut img = im::ImageBuffer::new(2, 2);
for x in 0..2 {
for y in 0..2 {
img.put_pixel(x, y,
im::Rgba([rand::random(), rand::random(), rand::random(), 255]));
}
}
Texture::from_image(
&mut texture_context,
&img,
&TextureSettings::new()
).unwrap()
}).collect::<Vec<Texture<_>>>()
};
let mut positions = (0..texture_count)
.map(|_| (rand::random(), rand::random()))
.collect::<Vec<(f64, f64)>>();
let mut counter = 0;
window.set_bench_mode(true);
while let Some(e) = window.next() {
if e.render_args().is_some() {
counter += 1;
if counter > frames { break; }
}
window.draw_2d(&e, |c, g, _| {
clear([0.0, 0.0, 0.0, 1.0], g);
for p in &mut positions {
let (x, y) = *p;
*p = (x + (rand::random::<f64>() - 0.5) * 0.01,
y + (rand::random::<f64>() - 0.5) * 0.01);
}
for i in 0..texture_count {
let p = positions[i];
image(&textures[i], c.transform
.trans(p.0 * 1024.0, p.1 * 1024.0).zoom(size), g);
}
});
}
}
| main | identifier_name |
texture_swap.rs | extern crate rand;
extern crate piston_window;
extern crate image as im;
use piston_window::*;
fn main() | Texture::from_image(
&mut texture_context,
&img,
&TextureSettings::new()
).unwrap()
}).collect::<Vec<Texture<_>>>()
};
let mut positions = (0..texture_count)
.map(|_| (rand::random(), rand::random()))
.collect::<Vec<(f64, f64)>>();
let mut counter = 0;
window.set_bench_mode(true);
while let Some(e) = window.next() {
if e.render_args().is_some() {
counter += 1;
if counter > frames { break; }
}
window.draw_2d(&e, |c, g, _| {
clear([0.0, 0.0, 0.0, 1.0], g);
for p in &mut positions {
let (x, y) = *p;
*p = (x + (rand::random::<f64>() - 0.5) * 0.01,
y + (rand::random::<f64>() - 0.5) * 0.01);
}
for i in 0..texture_count {
let p = positions[i];
image(&textures[i], c.transform
.trans(p.0 * 1024.0, p.1 * 1024.0).zoom(size), g);
}
});
}
}
| {
let texture_count = 1024;
let frames = 200;
let size = 32.0;
let mut window: PistonWindow = WindowSettings::new("piston", [1024; 2]).build().unwrap();
let mut texture_context = TextureContext {
factory: window.factory.clone(),
encoder: window.factory.create_command_buffer().into()
};
let textures = {
(0..texture_count).map(|_| {
let mut img = im::ImageBuffer::new(2, 2);
for x in 0..2 {
for y in 0..2 {
img.put_pixel(x, y,
im::Rgba([rand::random(), rand::random(), rand::random(), 255]));
}
} | identifier_body |
mutex.rs | use alloc::boxed::Box;
use core::borrow::{Borrow, BorrowMut};
use core::ops::{Deref, DerefMut};
use core::cell::{Cell, RefCell, RefMut};
use crate::syscall;
use core::marker::Sync;
#[link(name="os_init", kind="static")]
extern "C" {
fn mutex_lock(lock: *mut i32) -> i32;
}
pub enum TryLockResult {
AlreadyLocked,
Poisoned,
}
pub struct Mutex<T:?Sized> {
data: Box<RefCell<T>>,
lock: Cell<i32>,
}
| pub struct LockedResource<'a, T: 'a +?Sized> {
mutex: &'a Mutex<T>,
data_ref: RefMut<'a, T>
}
impl<T> Mutex<T> {
pub fn new(t: T) -> Mutex<T> {
Self {
data: Box::new(RefCell::new(t)),
lock: Cell::new(0),
}
}
}
impl<T:?Sized> Mutex<T> {
pub fn lock(&self) -> LockedResource<'_, T> {
unsafe {
while mutex_lock(self.lock.as_ptr())!= 0 {
syscall::sys_sleep(0);
}
}
LockedResource {
mutex: &self,
data_ref: (*self.data).borrow_mut(),
}
}
pub fn try_lock(&self) -> Result<LockedResource<'_, T>, TryLockResult> {
unsafe {
if mutex_lock(self.lock.as_ptr())!= 0 {
return Err(TryLockResult::AlreadyLocked)
}
}
Ok(LockedResource {
mutex: &self,
data_ref: (*self.data).borrow_mut(),
})
}
}
impl<'a, T:?Sized> Deref for LockedResource<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.data_ref.borrow()
}
}
impl<'a, T:?Sized> DerefMut for LockedResource<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.data_ref.borrow_mut()
}
}
impl<'a, T:?Sized> Drop for LockedResource<'a, T> {
fn drop(&mut self) {
self.mutex.lock.set(0);
}
} | random_line_split |
|
mutex.rs | use alloc::boxed::Box;
use core::borrow::{Borrow, BorrowMut};
use core::ops::{Deref, DerefMut};
use core::cell::{Cell, RefCell, RefMut};
use crate::syscall;
use core::marker::Sync;
#[link(name="os_init", kind="static")]
extern "C" {
fn mutex_lock(lock: *mut i32) -> i32;
}
pub enum TryLockResult {
AlreadyLocked,
Poisoned,
}
pub struct Mutex<T:?Sized> {
data: Box<RefCell<T>>,
lock: Cell<i32>,
}
pub struct LockedResource<'a, T: 'a +?Sized> {
mutex: &'a Mutex<T>,
data_ref: RefMut<'a, T>
}
impl<T> Mutex<T> {
pub fn new(t: T) -> Mutex<T> {
Self {
data: Box::new(RefCell::new(t)),
lock: Cell::new(0),
}
}
}
impl<T:?Sized> Mutex<T> {
pub fn lock(&self) -> LockedResource<'_, T> {
unsafe {
while mutex_lock(self.lock.as_ptr())!= 0 {
syscall::sys_sleep(0);
}
}
LockedResource {
mutex: &self,
data_ref: (*self.data).borrow_mut(),
}
}
pub fn try_lock(&self) -> Result<LockedResource<'_, T>, TryLockResult> {
unsafe {
if mutex_lock(self.lock.as_ptr())!= 0 {
return Err(TryLockResult::AlreadyLocked)
}
}
Ok(LockedResource {
mutex: &self,
data_ref: (*self.data).borrow_mut(),
})
}
}
impl<'a, T:?Sized> Deref for LockedResource<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.data_ref.borrow()
}
}
impl<'a, T:?Sized> DerefMut for LockedResource<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.data_ref.borrow_mut()
}
}
impl<'a, T:?Sized> Drop for LockedResource<'a, T> {
fn | (&mut self) {
self.mutex.lock.set(0);
}
} | drop | identifier_name |
mutex.rs | use alloc::boxed::Box;
use core::borrow::{Borrow, BorrowMut};
use core::ops::{Deref, DerefMut};
use core::cell::{Cell, RefCell, RefMut};
use crate::syscall;
use core::marker::Sync;
#[link(name="os_init", kind="static")]
extern "C" {
fn mutex_lock(lock: *mut i32) -> i32;
}
pub enum TryLockResult {
AlreadyLocked,
Poisoned,
}
pub struct Mutex<T:?Sized> {
data: Box<RefCell<T>>,
lock: Cell<i32>,
}
pub struct LockedResource<'a, T: 'a +?Sized> {
mutex: &'a Mutex<T>,
data_ref: RefMut<'a, T>
}
impl<T> Mutex<T> {
pub fn new(t: T) -> Mutex<T> {
Self {
data: Box::new(RefCell::new(t)),
lock: Cell::new(0),
}
}
}
impl<T:?Sized> Mutex<T> {
pub fn lock(&self) -> LockedResource<'_, T> {
unsafe {
while mutex_lock(self.lock.as_ptr())!= 0 {
syscall::sys_sleep(0);
}
}
LockedResource {
mutex: &self,
data_ref: (*self.data).borrow_mut(),
}
}
pub fn try_lock(&self) -> Result<LockedResource<'_, T>, TryLockResult> {
unsafe {
if mutex_lock(self.lock.as_ptr())!= 0 |
}
Ok(LockedResource {
mutex: &self,
data_ref: (*self.data).borrow_mut(),
})
}
}
impl<'a, T:?Sized> Deref for LockedResource<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.data_ref.borrow()
}
}
impl<'a, T:?Sized> DerefMut for LockedResource<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.data_ref.borrow_mut()
}
}
impl<'a, T:?Sized> Drop for LockedResource<'a, T> {
fn drop(&mut self) {
self.mutex.lock.set(0);
}
} | {
return Err(TryLockResult::AlreadyLocked)
} | conditional_block |
trait-pointers.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags:-Z extra-debug-info |
trait Trait {
fn method(&self) -> int { 0 }
}
struct Struct {
a: int,
b: f64
}
impl Trait for Struct {}
// There is no real test here yet. Just make sure that it compiles without crashing.
fn main() {
let stack_struct = Struct { a:0, b: 1.0 };
let reference: &Trait = &stack_struct as &Trait;
let managed: @Trait = @Struct { a:2, b: 3.0 } as @Trait;
let unique: ~Trait = ~Struct { a:2, b: 3.0 } as ~Trait;
} | // debugger:run
#[allow(unused_variable)]; | random_line_split |
trait-pointers.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags:-Z extra-debug-info
// debugger:run
#[allow(unused_variable)];
trait Trait {
fn method(&self) -> int { 0 }
}
struct | {
a: int,
b: f64
}
impl Trait for Struct {}
// There is no real test here yet. Just make sure that it compiles without crashing.
fn main() {
let stack_struct = Struct { a:0, b: 1.0 };
let reference: &Trait = &stack_struct as &Trait;
let managed: @Trait = @Struct { a:2, b: 3.0 } as @Trait;
let unique: ~Trait = ~Struct { a:2, b: 3.0 } as ~Trait;
}
| Struct | identifier_name |
recursive.rs | fn comb<T>(slice: &[T], k: usize) -> Vec<Vec<T>>
where
T: Copy,
{
// If k == 1, return a vector containing a vector for each element of the slice.
if k == 1 {
return slice.iter().map(|x| vec![*x]).collect::<Vec<Vec<T>>>();
}
// If k is exactly the slice length, return the slice inside a vector.
if k == slice.len() {
return vec![slice.to_vec()];
}
// Make a vector from the first element + all combinations of k - 1 elements of the rest of the slice.
let mut result = comb(&slice[1..], k - 1)
.into_iter()
.map(|x| [&slice[..1], x.as_slice()].concat())
.collect::<Vec<Vec<T>>>();
// Extend this last vector with the all the combinations of k elements after from index 1 onward.
result.extend(comb(&slice[1..], k));
// Return final vector.
result
}
fn main() {
let vec1 = vec![1, 2, 3, 4, 5];
println!("{:?}", comb(&vec1, 3));
let vec2 = vec!["A", "B", "C", "D", "E"];
println!("{:?}", comb(&vec2, 3));
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn five_numbers_choose_three() {
let computed = comb(&[1, 2, 3, 4, 5], 3);
let expected = vec![
vec![1, 2, 3],
vec![1, 2, 4],
vec![1, 2, 5],
vec![1, 3, 4],
vec![1, 3, 5],
vec![1, 4, 5],
vec![2, 3, 4],
vec![2, 3, 5],
vec![2, 4, 5],
vec![3, 4, 5],
];
assert_eq!(computed, expected);
}
#[test]
fn | () {
let computed = comb(&["h", "e", "l", "l", "o"], 2);
let expected = vec![
vec!["h", "e"],
vec!["h", "l"],
vec!["h", "l"],
vec!["h", "o"],
vec!["e", "l"],
vec!["e", "l"],
vec!["e", "o"],
vec!["l", "l"],
vec!["l", "o"],
vec!["l", "o"],
];
assert_eq!(computed, expected);
}
}
| four_letters_choose_two | identifier_name |
recursive.rs | fn comb<T>(slice: &[T], k: usize) -> Vec<Vec<T>>
where
T: Copy,
|
fn main() {
let vec1 = vec![1, 2, 3, 4, 5];
println!("{:?}", comb(&vec1, 3));
let vec2 = vec!["A", "B", "C", "D", "E"];
println!("{:?}", comb(&vec2, 3));
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn five_numbers_choose_three() {
let computed = comb(&[1, 2, 3, 4, 5], 3);
let expected = vec![
vec![1, 2, 3],
vec![1, 2, 4],
vec![1, 2, 5],
vec![1, 3, 4],
vec![1, 3, 5],
vec![1, 4, 5],
vec![2, 3, 4],
vec![2, 3, 5],
vec![2, 4, 5],
vec![3, 4, 5],
];
assert_eq!(computed, expected);
}
#[test]
fn four_letters_choose_two() {
let computed = comb(&["h", "e", "l", "l", "o"], 2);
let expected = vec![
vec!["h", "e"],
vec!["h", "l"],
vec!["h", "l"],
vec!["h", "o"],
vec!["e", "l"],
vec!["e", "l"],
vec!["e", "o"],
vec!["l", "l"],
vec!["l", "o"],
vec!["l", "o"],
];
assert_eq!(computed, expected);
}
}
| {
// If k == 1, return a vector containing a vector for each element of the slice.
if k == 1 {
return slice.iter().map(|x| vec![*x]).collect::<Vec<Vec<T>>>();
}
// If k is exactly the slice length, return the slice inside a vector.
if k == slice.len() {
return vec![slice.to_vec()];
}
// Make a vector from the first element + all combinations of k - 1 elements of the rest of the slice.
let mut result = comb(&slice[1..], k - 1)
.into_iter()
.map(|x| [&slice[..1], x.as_slice()].concat())
.collect::<Vec<Vec<T>>>();
// Extend this last vector with the all the combinations of k elements after from index 1 onward.
result.extend(comb(&slice[1..], k));
// Return final vector.
result
} | identifier_body |
recursive.rs | fn comb<T>(slice: &[T], k: usize) -> Vec<Vec<T>>
where
T: Copy,
{
// If k == 1, return a vector containing a vector for each element of the slice.
if k == 1 {
return slice.iter().map(|x| vec![*x]).collect::<Vec<Vec<T>>>();
}
// If k is exactly the slice length, return the slice inside a vector.
if k == slice.len() {
return vec![slice.to_vec()];
}
// Make a vector from the first element + all combinations of k - 1 elements of the rest of the slice.
let mut result = comb(&slice[1..], k - 1)
.into_iter()
.map(|x| [&slice[..1], x.as_slice()].concat())
.collect::<Vec<Vec<T>>>();
// Extend this last vector with the all the combinations of k elements after from index 1 onward.
result.extend(comb(&slice[1..], k));
// Return final vector.
result
}
fn main() {
let vec1 = vec![1, 2, 3, 4, 5];
println!("{:?}", comb(&vec1, 3));
let vec2 = vec!["A", "B", "C", "D", "E"];
println!("{:?}", comb(&vec2, 3));
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn five_numbers_choose_three() {
let computed = comb(&[1, 2, 3, 4, 5], 3);
let expected = vec![
vec![1, 2, 3],
vec![1, 2, 4],
vec![1, 2, 5],
vec![1, 3, 4],
vec![1, 3, 5],
vec![1, 4, 5],
vec![2, 3, 4],
vec![2, 3, 5],
vec![2, 4, 5],
vec![3, 4, 5],
];
assert_eq!(computed, expected);
}
#[test]
fn four_letters_choose_two() {
let computed = comb(&["h", "e", "l", "l", "o"], 2);
let expected = vec![
vec!["h", "e"],
vec!["h", "l"],
vec!["h", "l"],
vec!["h", "o"], | vec!["e", "o"],
vec!["l", "l"],
vec!["l", "o"],
vec!["l", "o"],
];
assert_eq!(computed, expected);
}
} | vec!["e", "l"],
vec!["e", "l"], | random_line_split |
recursive.rs | fn comb<T>(slice: &[T], k: usize) -> Vec<Vec<T>>
where
T: Copy,
{
// If k == 1, return a vector containing a vector for each element of the slice.
if k == 1 |
// If k is exactly the slice length, return the slice inside a vector.
if k == slice.len() {
return vec![slice.to_vec()];
}
// Make a vector from the first element + all combinations of k - 1 elements of the rest of the slice.
let mut result = comb(&slice[1..], k - 1)
.into_iter()
.map(|x| [&slice[..1], x.as_slice()].concat())
.collect::<Vec<Vec<T>>>();
// Extend this last vector with the all the combinations of k elements after from index 1 onward.
result.extend(comb(&slice[1..], k));
// Return final vector.
result
}
fn main() {
let vec1 = vec![1, 2, 3, 4, 5];
println!("{:?}", comb(&vec1, 3));
let vec2 = vec!["A", "B", "C", "D", "E"];
println!("{:?}", comb(&vec2, 3));
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn five_numbers_choose_three() {
let computed = comb(&[1, 2, 3, 4, 5], 3);
let expected = vec![
vec![1, 2, 3],
vec![1, 2, 4],
vec![1, 2, 5],
vec![1, 3, 4],
vec![1, 3, 5],
vec![1, 4, 5],
vec![2, 3, 4],
vec![2, 3, 5],
vec![2, 4, 5],
vec![3, 4, 5],
];
assert_eq!(computed, expected);
}
#[test]
fn four_letters_choose_two() {
let computed = comb(&["h", "e", "l", "l", "o"], 2);
let expected = vec![
vec!["h", "e"],
vec!["h", "l"],
vec!["h", "l"],
vec!["h", "o"],
vec!["e", "l"],
vec!["e", "l"],
vec!["e", "o"],
vec!["l", "l"],
vec!["l", "o"],
vec!["l", "o"],
];
assert_eq!(computed, expected);
}
}
| {
return slice.iter().map(|x| vec![*x]).collect::<Vec<Vec<T>>>();
} | conditional_block |
os_str.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// The underlying OsString/OsStr implementation on Unix systems: just
/// a `Vec<u8>`/`[u8]`.
use core::prelude::*;
use fmt::{self, Debug};
use vec::Vec;
use slice::SliceExt as StdSliceExt;
use str;
use string::{String, CowString};
use mem;
#[derive(Clone)]
pub struct Buf {
pub inner: Vec<u8>
}
pub struct Slice {
pub inner: [u8]
}
impl Debug for Slice {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
self.to_string_lossy().fmt(formatter)
}
}
impl Debug for Buf {
fn | (&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
self.as_slice().fmt(formatter)
}
}
impl Buf {
pub fn from_string(s: String) -> Buf {
Buf { inner: s.into_bytes() }
}
pub fn from_str(s: &str) -> Buf {
Buf { inner: s.as_bytes().to_vec() }
}
pub fn as_slice(&self) -> &Slice {
unsafe { mem::transmute(self.inner.as_slice()) }
}
pub fn into_string(self) -> Result<String, Buf> {
String::from_utf8(self.inner).map_err(|p| Buf { inner: p.into_bytes() } )
}
pub fn push_slice(&mut self, s: &Slice) {
self.inner.push_all(&s.inner)
}
}
impl Slice {
fn from_u8_slice(s: &[u8]) -> &Slice {
unsafe { mem::transmute(s) }
}
pub fn from_str(s: &str) -> &Slice {
unsafe { mem::transmute(s.as_bytes()) }
}
pub fn to_str(&self) -> Option<&str> {
str::from_utf8(&self.inner).ok()
}
pub fn to_string_lossy(&self) -> CowString {
String::from_utf8_lossy(&self.inner)
}
pub fn to_owned(&self) -> Buf {
Buf { inner: self.inner.to_vec() }
}
}
| fmt | identifier_name |
os_str.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// The underlying OsString/OsStr implementation on Unix systems: just
/// a `Vec<u8>`/`[u8]`.
use core::prelude::*;
use fmt::{self, Debug};
use vec::Vec;
use slice::SliceExt as StdSliceExt;
use str;
use string::{String, CowString};
use mem;
#[derive(Clone)]
pub struct Buf {
pub inner: Vec<u8>
}
pub struct Slice {
pub inner: [u8]
}
impl Debug for Slice {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
self.to_string_lossy().fmt(formatter)
}
}
impl Debug for Buf {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
self.as_slice().fmt(formatter)
}
}
impl Buf {
pub fn from_string(s: String) -> Buf {
Buf { inner: s.into_bytes() }
}
pub fn from_str(s: &str) -> Buf {
Buf { inner: s.as_bytes().to_vec() }
}
pub fn as_slice(&self) -> &Slice {
unsafe { mem::transmute(self.inner.as_slice()) }
}
pub fn into_string(self) -> Result<String, Buf> {
String::from_utf8(self.inner).map_err(|p| Buf { inner: p.into_bytes() } )
}
pub fn push_slice(&mut self, s: &Slice) {
self.inner.push_all(&s.inner)
}
}
impl Slice {
fn from_u8_slice(s: &[u8]) -> &Slice {
unsafe { mem::transmute(s) }
}
pub fn from_str(s: &str) -> &Slice {
unsafe { mem::transmute(s.as_bytes()) }
}
pub fn to_str(&self) -> Option<&str> {
str::from_utf8(&self.inner).ok()
}
pub fn to_string_lossy(&self) -> CowString {
String::from_utf8_lossy(&self.inner)
}
pub fn to_owned(&self) -> Buf |
}
| {
Buf { inner: self.inner.to_vec() }
} | identifier_body |
os_str.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// The underlying OsString/OsStr implementation on Unix systems: just
/// a `Vec<u8>`/`[u8]`.
use core::prelude::*;
use fmt::{self, Debug};
use vec::Vec;
use slice::SliceExt as StdSliceExt;
use str;
use string::{String, CowString};
use mem;
#[derive(Clone)]
pub struct Buf {
pub inner: Vec<u8>
}
pub struct Slice {
pub inner: [u8]
}
impl Debug for Slice {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
self.to_string_lossy().fmt(formatter)
}
}
impl Debug for Buf {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
self.as_slice().fmt(formatter)
}
}
impl Buf {
pub fn from_string(s: String) -> Buf {
Buf { inner: s.into_bytes() }
}
pub fn from_str(s: &str) -> Buf {
Buf { inner: s.as_bytes().to_vec() }
}
pub fn as_slice(&self) -> &Slice {
unsafe { mem::transmute(self.inner.as_slice()) }
}
pub fn into_string(self) -> Result<String, Buf> {
String::from_utf8(self.inner).map_err(|p| Buf { inner: p.into_bytes() } )
}
pub fn push_slice(&mut self, s: &Slice) {
self.inner.push_all(&s.inner)
}
}
impl Slice {
fn from_u8_slice(s: &[u8]) -> &Slice {
unsafe { mem::transmute(s) }
}
pub fn from_str(s: &str) -> &Slice {
unsafe { mem::transmute(s.as_bytes()) }
}
pub fn to_str(&self) -> Option<&str> { | String::from_utf8_lossy(&self.inner)
}
pub fn to_owned(&self) -> Buf {
Buf { inner: self.inner.to_vec() }
}
} | str::from_utf8(&self.inner).ok()
}
pub fn to_string_lossy(&self) -> CowString { | random_line_split |
nbody.rs | // The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// contributed by the Rust Project Developers
// contributed by TeXitoi
const PI: f64 = 3.141592653589793;
const SOLAR_MASS: f64 = 4.0 * PI * PI;
const YEAR: f64 = 365.24;
const N_BODIES: usize = 5;
static BODIES: [Planet;N_BODIES] = [
// Sun
Planet {
x: 0.0, y: 0.0, z: 0.0,
vx: 0.0, vy: 0.0, vz: 0.0,
mass: SOLAR_MASS,
},
// Jupiter
Planet {
x: 4.84143144246472090e+00,
y: -1.16032004402742839e+00,
z: -1.03622044471123109e-01,
vx: 1.66007664274403694e-03 * YEAR,
vy: 7.69901118419740425e-03 * YEAR,
vz: -6.90460016972063023e-05 * YEAR,
mass: 9.54791938424326609e-04 * SOLAR_MASS,
},
// Saturn
Planet {
x: 8.34336671824457987e+00,
y: 4.12479856412430479e+00,
z: -4.03523417114321381e-01,
vx: -2.76742510726862411e-03 * YEAR,
vy: 4.99852801234917238e-03 * YEAR,
vz: 2.30417297573763929e-05 * YEAR,
mass: 2.85885980666130812e-04 * SOLAR_MASS,
},
// Uranus
Planet {
x: 1.28943695621391310e+01,
y: -1.51111514016986312e+01,
z: -2.23307578892655734e-01,
vx: 2.96460137564761618e-03 * YEAR,
vy: 2.37847173959480950e-03 * YEAR,
vz: -2.96589568540237556e-05 * YEAR,
mass: 4.36624404335156298e-05 * SOLAR_MASS,
},
// Neptune
Planet {
x: 1.53796971148509165e+01,
y: -2.59193146099879641e+01,
z: 1.79258772950371181e-01,
vx: 2.68067772490389322e-03 * YEAR,
vy: 1.62824170038242295e-03 * YEAR,
vz: -9.51592254519715870e-05 * YEAR,
mass: 5.15138902046611451e-05 * SOLAR_MASS,
},
];
#[derive(Clone, Copy)]
struct | {
x: f64, y: f64, z: f64,
vx: f64, vy: f64, vz: f64,
mass: f64,
}
fn advance(bodies: &mut [Planet;N_BODIES], dt: f64, steps: i32) {
for _ in (0..steps) {
let mut b_slice: &mut [_] = bodies;
loop {
let bi = match shift_mut_ref(&mut b_slice) {
Some(bi) => bi,
None => break
};
for bj in b_slice.iter_mut() {
let dx = bi.x - bj.x;
let dy = bi.y - bj.y;
let dz = bi.z - bj.z;
let d2 = dx * dx + dy * dy + dz * dz;
let mag = dt / (d2 * d2.sqrt());
let massj_mag = bj.mass * mag;
bi.vx -= dx * massj_mag;
bi.vy -= dy * massj_mag;
bi.vz -= dz * massj_mag;
let massi_mag = bi.mass * mag;
bj.vx += dx * massi_mag;
bj.vy += dy * massi_mag;
bj.vz += dz * massi_mag;
}
bi.x += dt * bi.vx;
bi.y += dt * bi.vy;
bi.z += dt * bi.vz;
}
}
}
fn energy(bodies: &[Planet;N_BODIES]) -> f64 {
let mut e = 0.0;
let mut bodies = bodies.iter();
loop {
let bi = match bodies.next() {
Some(bi) => bi,
None => break
};
e += (bi.vx * bi.vx + bi.vy * bi.vy + bi.vz * bi.vz) * bi.mass / 2.0;
for bj in bodies.clone() {
let dx = bi.x - bj.x;
let dy = bi.y - bj.y;
let dz = bi.z - bj.z;
let dist = (dx * dx + dy * dy + dz * dz).sqrt();
e -= bi.mass * bj.mass / dist;
}
}
e
}
fn offset_momentum(bodies: &mut [Planet;N_BODIES]) {
let mut px = 0.0;
let mut py = 0.0;
let mut pz = 0.0;
for bi in bodies.iter() {
px += bi.vx * bi.mass;
py += bi.vy * bi.mass;
pz += bi.vz * bi.mass;
}
let sun = &mut bodies[0];
sun.vx = - px / SOLAR_MASS;
sun.vy = - py / SOLAR_MASS;
sun.vz = - pz / SOLAR_MASS;
}
fn main() {
let n = std::env::args_os().nth(1)
.and_then(|s| s.into_string().ok())
.and_then(|n| n.parse().ok())
.unwrap_or(1000);
let mut bodies = BODIES;
offset_momentum(&mut bodies);
println!("{:.9}", energy(&bodies));
advance(&mut bodies, 0.01, n);
println!("{:.9}", energy(&bodies));
}
/// Pop a mutable reference off the head of a slice, mutating the slice to no
/// longer contain the mutable reference.
fn shift_mut_ref<'a, T>(r: &mut &'a mut [T]) -> Option<&'a mut T> {
if r.len() == 0 { return None }
let tmp = std::mem::replace(r, &mut []);
let (h, t) = tmp.split_at_mut(1);
*r = t;
Some(&mut h[0])
}
| Planet | identifier_name |
nbody.rs | // The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// contributed by the Rust Project Developers
// contributed by TeXitoi
const PI: f64 = 3.141592653589793;
const SOLAR_MASS: f64 = 4.0 * PI * PI;
const YEAR: f64 = 365.24;
const N_BODIES: usize = 5;
static BODIES: [Planet;N_BODIES] = [
// Sun
Planet {
x: 0.0, y: 0.0, z: 0.0,
vx: 0.0, vy: 0.0, vz: 0.0,
mass: SOLAR_MASS,
},
// Jupiter
Planet {
x: 4.84143144246472090e+00,
y: -1.16032004402742839e+00,
z: -1.03622044471123109e-01,
vx: 1.66007664274403694e-03 * YEAR,
vy: 7.69901118419740425e-03 * YEAR,
vz: -6.90460016972063023e-05 * YEAR,
mass: 9.54791938424326609e-04 * SOLAR_MASS,
},
// Saturn
Planet {
x: 8.34336671824457987e+00,
y: 4.12479856412430479e+00,
z: -4.03523417114321381e-01,
vx: -2.76742510726862411e-03 * YEAR,
vy: 4.99852801234917238e-03 * YEAR,
vz: 2.30417297573763929e-05 * YEAR,
mass: 2.85885980666130812e-04 * SOLAR_MASS,
},
// Uranus
Planet {
x: 1.28943695621391310e+01,
y: -1.51111514016986312e+01,
z: -2.23307578892655734e-01,
vx: 2.96460137564761618e-03 * YEAR,
vy: 2.37847173959480950e-03 * YEAR,
vz: -2.96589568540237556e-05 * YEAR,
mass: 4.36624404335156298e-05 * SOLAR_MASS,
},
// Neptune
Planet {
x: 1.53796971148509165e+01,
y: -2.59193146099879641e+01,
z: 1.79258772950371181e-01,
vx: 2.68067772490389322e-03 * YEAR,
vy: 1.62824170038242295e-03 * YEAR,
vz: -9.51592254519715870e-05 * YEAR,
mass: 5.15138902046611451e-05 * SOLAR_MASS,
},
];
#[derive(Clone, Copy)]
struct Planet {
x: f64, y: f64, z: f64,
vx: f64, vy: f64, vz: f64,
mass: f64,
}
fn advance(bodies: &mut [Planet;N_BODIES], dt: f64, steps: i32) { | for _ in (0..steps) {
let mut b_slice: &mut [_] = bodies;
loop {
let bi = match shift_mut_ref(&mut b_slice) {
Some(bi) => bi,
None => break
};
for bj in b_slice.iter_mut() {
let dx = bi.x - bj.x;
let dy = bi.y - bj.y;
let dz = bi.z - bj.z;
let d2 = dx * dx + dy * dy + dz * dz;
let mag = dt / (d2 * d2.sqrt());
let massj_mag = bj.mass * mag;
bi.vx -= dx * massj_mag;
bi.vy -= dy * massj_mag;
bi.vz -= dz * massj_mag;
let massi_mag = bi.mass * mag;
bj.vx += dx * massi_mag;
bj.vy += dy * massi_mag;
bj.vz += dz * massi_mag;
}
bi.x += dt * bi.vx;
bi.y += dt * bi.vy;
bi.z += dt * bi.vz;
}
}
}
fn energy(bodies: &[Planet;N_BODIES]) -> f64 {
let mut e = 0.0;
let mut bodies = bodies.iter();
loop {
let bi = match bodies.next() {
Some(bi) => bi,
None => break
};
e += (bi.vx * bi.vx + bi.vy * bi.vy + bi.vz * bi.vz) * bi.mass / 2.0;
for bj in bodies.clone() {
let dx = bi.x - bj.x;
let dy = bi.y - bj.y;
let dz = bi.z - bj.z;
let dist = (dx * dx + dy * dy + dz * dz).sqrt();
e -= bi.mass * bj.mass / dist;
}
}
e
}
fn offset_momentum(bodies: &mut [Planet;N_BODIES]) {
let mut px = 0.0;
let mut py = 0.0;
let mut pz = 0.0;
for bi in bodies.iter() {
px += bi.vx * bi.mass;
py += bi.vy * bi.mass;
pz += bi.vz * bi.mass;
}
let sun = &mut bodies[0];
sun.vx = - px / SOLAR_MASS;
sun.vy = - py / SOLAR_MASS;
sun.vz = - pz / SOLAR_MASS;
}
fn main() {
let n = std::env::args_os().nth(1)
.and_then(|s| s.into_string().ok())
.and_then(|n| n.parse().ok())
.unwrap_or(1000);
let mut bodies = BODIES;
offset_momentum(&mut bodies);
println!("{:.9}", energy(&bodies));
advance(&mut bodies, 0.01, n);
println!("{:.9}", energy(&bodies));
}
/// Pop a mutable reference off the head of a slice, mutating the slice to no
/// longer contain the mutable reference.
fn shift_mut_ref<'a, T>(r: &mut &'a mut [T]) -> Option<&'a mut T> {
if r.len() == 0 { return None }
let tmp = std::mem::replace(r, &mut []);
let (h, t) = tmp.split_at_mut(1);
*r = t;
Some(&mut h[0])
} | random_line_split |
|
nbody.rs | // The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// contributed by the Rust Project Developers
// contributed by TeXitoi
const PI: f64 = 3.141592653589793;
const SOLAR_MASS: f64 = 4.0 * PI * PI;
const YEAR: f64 = 365.24;
const N_BODIES: usize = 5;
static BODIES: [Planet;N_BODIES] = [
// Sun
Planet {
x: 0.0, y: 0.0, z: 0.0,
vx: 0.0, vy: 0.0, vz: 0.0,
mass: SOLAR_MASS,
},
// Jupiter
Planet {
x: 4.84143144246472090e+00,
y: -1.16032004402742839e+00,
z: -1.03622044471123109e-01,
vx: 1.66007664274403694e-03 * YEAR,
vy: 7.69901118419740425e-03 * YEAR,
vz: -6.90460016972063023e-05 * YEAR,
mass: 9.54791938424326609e-04 * SOLAR_MASS,
},
// Saturn
Planet {
x: 8.34336671824457987e+00,
y: 4.12479856412430479e+00,
z: -4.03523417114321381e-01,
vx: -2.76742510726862411e-03 * YEAR,
vy: 4.99852801234917238e-03 * YEAR,
vz: 2.30417297573763929e-05 * YEAR,
mass: 2.85885980666130812e-04 * SOLAR_MASS,
},
// Uranus
Planet {
x: 1.28943695621391310e+01,
y: -1.51111514016986312e+01,
z: -2.23307578892655734e-01,
vx: 2.96460137564761618e-03 * YEAR,
vy: 2.37847173959480950e-03 * YEAR,
vz: -2.96589568540237556e-05 * YEAR,
mass: 4.36624404335156298e-05 * SOLAR_MASS,
},
// Neptune
Planet {
x: 1.53796971148509165e+01,
y: -2.59193146099879641e+01,
z: 1.79258772950371181e-01,
vx: 2.68067772490389322e-03 * YEAR,
vy: 1.62824170038242295e-03 * YEAR,
vz: -9.51592254519715870e-05 * YEAR,
mass: 5.15138902046611451e-05 * SOLAR_MASS,
},
];
#[derive(Clone, Copy)]
struct Planet {
x: f64, y: f64, z: f64,
vx: f64, vy: f64, vz: f64,
mass: f64,
}
fn advance(bodies: &mut [Planet;N_BODIES], dt: f64, steps: i32) {
for _ in (0..steps) {
let mut b_slice: &mut [_] = bodies;
loop {
let bi = match shift_mut_ref(&mut b_slice) {
Some(bi) => bi,
None => break
};
for bj in b_slice.iter_mut() {
let dx = bi.x - bj.x;
let dy = bi.y - bj.y;
let dz = bi.z - bj.z;
let d2 = dx * dx + dy * dy + dz * dz;
let mag = dt / (d2 * d2.sqrt());
let massj_mag = bj.mass * mag;
bi.vx -= dx * massj_mag;
bi.vy -= dy * massj_mag;
bi.vz -= dz * massj_mag;
let massi_mag = bi.mass * mag;
bj.vx += dx * massi_mag;
bj.vy += dy * massi_mag;
bj.vz += dz * massi_mag;
}
bi.x += dt * bi.vx;
bi.y += dt * bi.vy;
bi.z += dt * bi.vz;
}
}
}
fn energy(bodies: &[Planet;N_BODIES]) -> f64 |
fn offset_momentum(bodies: &mut [Planet;N_BODIES]) {
let mut px = 0.0;
let mut py = 0.0;
let mut pz = 0.0;
for bi in bodies.iter() {
px += bi.vx * bi.mass;
py += bi.vy * bi.mass;
pz += bi.vz * bi.mass;
}
let sun = &mut bodies[0];
sun.vx = - px / SOLAR_MASS;
sun.vy = - py / SOLAR_MASS;
sun.vz = - pz / SOLAR_MASS;
}
fn main() {
let n = std::env::args_os().nth(1)
.and_then(|s| s.into_string().ok())
.and_then(|n| n.parse().ok())
.unwrap_or(1000);
let mut bodies = BODIES;
offset_momentum(&mut bodies);
println!("{:.9}", energy(&bodies));
advance(&mut bodies, 0.01, n);
println!("{:.9}", energy(&bodies));
}
/// Pop a mutable reference off the head of a slice, mutating the slice to no
/// longer contain the mutable reference.
fn shift_mut_ref<'a, T>(r: &mut &'a mut [T]) -> Option<&'a mut T> {
if r.len() == 0 { return None }
let tmp = std::mem::replace(r, &mut []);
let (h, t) = tmp.split_at_mut(1);
*r = t;
Some(&mut h[0])
}
| {
let mut e = 0.0;
let mut bodies = bodies.iter();
loop {
let bi = match bodies.next() {
Some(bi) => bi,
None => break
};
e += (bi.vx * bi.vx + bi.vy * bi.vy + bi.vz * bi.vz) * bi.mass / 2.0;
for bj in bodies.clone() {
let dx = bi.x - bj.x;
let dy = bi.y - bj.y;
let dz = bi.z - bj.z;
let dist = (dx * dx + dy * dy + dz * dz).sqrt();
e -= bi.mass * bj.mass / dist;
}
}
e
} | identifier_body |
managed.rs | use gfx::{Encoder, Resources, CommandBuffer, Slice, IndexBuffer};
use gfx::memory::{Usage, TRANSFER_DST};
use gfx::handle::Buffer;
use gfx::traits::{Factory, FactoryExt};
use gfx::buffer::Role;
use ui::render::Vertex;
// step: 128 vertices (4096 bytes, 42 triangles + 2 extra vertices)
const ALLOC_STEP: usize = 128;
#[derive(Debug)]
struct Zone {
start: usize,
size: usize
}
pub struct ManagedBuffer<R> where R: Resources {
local: Vec<Vertex>,
remote: Buffer<R, Vertex>,
zones: Vec<(Zone, bool)>,
tail: usize
}
impl<R> ManagedBuffer<R> where R: Resources {
pub fn new<F>(factory: &mut F) -> Self where F: Factory<R> {
ManagedBuffer {
local: Vec::new(),
remote: factory.create_buffer(ALLOC_STEP, Role::Vertex, Usage::Dynamic, TRANSFER_DST).unwrap(),
zones: Vec::new(),
tail: 0
}
}
pub fn new_zone(&mut self) -> usize {
self.zones.push((Zone {start: self.tail, size: 0}, true));
self.zones.len() - 1
}
pub fn replace_zone(&mut self, buffer: &[Vertex], zone: usize) {
let (ref mut zone, ref mut dirty) = self.zones[zone];
*dirty = true;
if zone.size == buffer.len() {
let slice = &mut self.local[zone.start..zone.start + zone.size];
slice.copy_from_slice(buffer);
} else {
// TODO: Shift later elements forward or backwards.
unimplemented!()
}
}
fn get_zone(&self, index: usize) -> &[Vertex] {
let zone = &self.zones[index].0;
&self.local[zone.start..zone.start+zone.size]
}
// TODO: Handle errors.
pub fn update<F, C>(&mut self, factory: &mut F, encoder: &mut Encoder<R, C>) where F: Factory<R> + FactoryExt<R>, C: CommandBuffer<R> {
//println!("Begin update");
if self.local.len() > self.remote.len() {
// Full update
let (pages, other) = (self.local.len() / ALLOC_STEP, self.local.len() % ALLOC_STEP);
let pages = pages + if other!= 0 {1} else {0};
//println!("Full update {} -> {}", self.remote.len(), pages * ALLOC_STEP);
self.remote = factory.create_buffer(pages * ALLOC_STEP, Role::Vertex, Usage::Dynamic, TRANSFER_DST).unwrap();
encoder.update_buffer(&self.remote, &self.local[..self.tail], 0).unwrap();
} else {
// Partial update
for &mut (ref zone, ref mut dirty) in self.zones.iter_mut().filter(|&&mut (_, dirty)| dirty) {
// TODO: Performance: Roll adjacent updates into a single update.
//println!("Update partial: {:?}", zone);
encoder.update_buffer(&self.remote, &self.local[zone.start..zone.start+zone.size], zone.start).unwrap();
*dirty = false
}
}
//println!("End update");
}
pub fn remote(&self) -> &Buffer<R, Vertex> |
pub fn slice(&self) -> Slice<R> {
Slice {
start: 0,
end: self.tail as u32,
base_vertex: 0,
instances: None,
buffer: IndexBuffer::Auto
}
}
}
impl<R> Extend<Vertex> for ManagedBuffer<R> where R: Resources {
fn extend<I>(&mut self, iter: I) where I: IntoIterator<Item=Vertex> {
if let Some(zone) = self.zones.last_mut() {
let old_len = self.local.len();
self.local.extend(iter);
let len = self.local.len() - old_len;
zone.0.size += len;
zone.1 = true;
self.tail += len;
} else {
panic!("Tried to extend to a previously created zone, but there are no zones.");
}
}
}
impl<'a, R> Extend<&'a Vertex> for ManagedBuffer<R> where R: Resources {
fn extend<I>(&mut self, iter: I) where I: IntoIterator<Item=&'a Vertex> {
if let Some(zone) = self.zones.last_mut() {
let old_len = self.local.len();
self.local.extend(iter);
let len = self.local.len() - old_len;
zone.0.size += len;
zone.1 = true;
self.tail += len;
} else {
panic!("Tried to extend to a previously created zone, but there are no zones.");
}
}
} | {
&self.remote
} | identifier_body |
managed.rs | use gfx::{Encoder, Resources, CommandBuffer, Slice, IndexBuffer};
use gfx::memory::{Usage, TRANSFER_DST};
use gfx::handle::Buffer;
use gfx::traits::{Factory, FactoryExt};
use gfx::buffer::Role;
use ui::render::Vertex;
// step: 128 vertices (4096 bytes, 42 triangles + 2 extra vertices)
const ALLOC_STEP: usize = 128;
#[derive(Debug)]
struct | {
start: usize,
size: usize
}
pub struct ManagedBuffer<R> where R: Resources {
local: Vec<Vertex>,
remote: Buffer<R, Vertex>,
zones: Vec<(Zone, bool)>,
tail: usize
}
impl<R> ManagedBuffer<R> where R: Resources {
pub fn new<F>(factory: &mut F) -> Self where F: Factory<R> {
ManagedBuffer {
local: Vec::new(),
remote: factory.create_buffer(ALLOC_STEP, Role::Vertex, Usage::Dynamic, TRANSFER_DST).unwrap(),
zones: Vec::new(),
tail: 0
}
}
pub fn new_zone(&mut self) -> usize {
self.zones.push((Zone {start: self.tail, size: 0}, true));
self.zones.len() - 1
}
pub fn replace_zone(&mut self, buffer: &[Vertex], zone: usize) {
let (ref mut zone, ref mut dirty) = self.zones[zone];
*dirty = true;
if zone.size == buffer.len() {
let slice = &mut self.local[zone.start..zone.start + zone.size];
slice.copy_from_slice(buffer);
} else {
// TODO: Shift later elements forward or backwards.
unimplemented!()
}
}
fn get_zone(&self, index: usize) -> &[Vertex] {
let zone = &self.zones[index].0;
&self.local[zone.start..zone.start+zone.size]
}
// TODO: Handle errors.
pub fn update<F, C>(&mut self, factory: &mut F, encoder: &mut Encoder<R, C>) where F: Factory<R> + FactoryExt<R>, C: CommandBuffer<R> {
//println!("Begin update");
if self.local.len() > self.remote.len() {
// Full update
let (pages, other) = (self.local.len() / ALLOC_STEP, self.local.len() % ALLOC_STEP);
let pages = pages + if other!= 0 {1} else {0};
//println!("Full update {} -> {}", self.remote.len(), pages * ALLOC_STEP);
self.remote = factory.create_buffer(pages * ALLOC_STEP, Role::Vertex, Usage::Dynamic, TRANSFER_DST).unwrap();
encoder.update_buffer(&self.remote, &self.local[..self.tail], 0).unwrap();
} else {
// Partial update
for &mut (ref zone, ref mut dirty) in self.zones.iter_mut().filter(|&&mut (_, dirty)| dirty) {
// TODO: Performance: Roll adjacent updates into a single update.
//println!("Update partial: {:?}", zone);
encoder.update_buffer(&self.remote, &self.local[zone.start..zone.start+zone.size], zone.start).unwrap();
*dirty = false
}
}
//println!("End update");
}
pub fn remote(&self) -> &Buffer<R, Vertex> {
&self.remote
}
pub fn slice(&self) -> Slice<R> {
Slice {
start: 0,
end: self.tail as u32,
base_vertex: 0,
instances: None,
buffer: IndexBuffer::Auto
}
}
}
impl<R> Extend<Vertex> for ManagedBuffer<R> where R: Resources {
fn extend<I>(&mut self, iter: I) where I: IntoIterator<Item=Vertex> {
if let Some(zone) = self.zones.last_mut() {
let old_len = self.local.len();
self.local.extend(iter);
let len = self.local.len() - old_len;
zone.0.size += len;
zone.1 = true;
self.tail += len;
} else {
panic!("Tried to extend to a previously created zone, but there are no zones.");
}
}
}
impl<'a, R> Extend<&'a Vertex> for ManagedBuffer<R> where R: Resources {
fn extend<I>(&mut self, iter: I) where I: IntoIterator<Item=&'a Vertex> {
if let Some(zone) = self.zones.last_mut() {
let old_len = self.local.len();
self.local.extend(iter);
let len = self.local.len() - old_len;
zone.0.size += len;
zone.1 = true;
self.tail += len;
} else {
panic!("Tried to extend to a previously created zone, but there are no zones.");
}
}
} | Zone | identifier_name |
managed.rs | use gfx::{Encoder, Resources, CommandBuffer, Slice, IndexBuffer};
use gfx::memory::{Usage, TRANSFER_DST};
use gfx::handle::Buffer;
use gfx::traits::{Factory, FactoryExt};
use gfx::buffer::Role;
use ui::render::Vertex;
// step: 128 vertices (4096 bytes, 42 triangles + 2 extra vertices)
const ALLOC_STEP: usize = 128;
#[derive(Debug)]
struct Zone {
start: usize,
size: usize
}
pub struct ManagedBuffer<R> where R: Resources {
local: Vec<Vertex>,
remote: Buffer<R, Vertex>,
zones: Vec<(Zone, bool)>,
tail: usize
}
impl<R> ManagedBuffer<R> where R: Resources {
pub fn new<F>(factory: &mut F) -> Self where F: Factory<R> {
ManagedBuffer {
local: Vec::new(),
remote: factory.create_buffer(ALLOC_STEP, Role::Vertex, Usage::Dynamic, TRANSFER_DST).unwrap(),
zones: Vec::new(),
tail: 0
}
}
pub fn new_zone(&mut self) -> usize {
self.zones.push((Zone {start: self.tail, size: 0}, true));
self.zones.len() - 1
}
pub fn replace_zone(&mut self, buffer: &[Vertex], zone: usize) {
let (ref mut zone, ref mut dirty) = self.zones[zone];
*dirty = true;
if zone.size == buffer.len() {
let slice = &mut self.local[zone.start..zone.start + zone.size];
slice.copy_from_slice(buffer);
} else {
// TODO: Shift later elements forward or backwards.
unimplemented!()
}
}
fn get_zone(&self, index: usize) -> &[Vertex] {
let zone = &self.zones[index].0;
&self.local[zone.start..zone.start+zone.size]
}
// TODO: Handle errors.
pub fn update<F, C>(&mut self, factory: &mut F, encoder: &mut Encoder<R, C>) where F: Factory<R> + FactoryExt<R>, C: CommandBuffer<R> {
//println!("Begin update");
if self.local.len() > self.remote.len() {
// Full update
let (pages, other) = (self.local.len() / ALLOC_STEP, self.local.len() % ALLOC_STEP);
let pages = pages + if other!= 0 {1} else {0};
//println!("Full update {} -> {}", self.remote.len(), pages * ALLOC_STEP);
self.remote = factory.create_buffer(pages * ALLOC_STEP, Role::Vertex, Usage::Dynamic, TRANSFER_DST).unwrap();
encoder.update_buffer(&self.remote, &self.local[..self.tail], 0).unwrap();
} else {
// Partial update
for &mut (ref zone, ref mut dirty) in self.zones.iter_mut().filter(|&&mut (_, dirty)| dirty) {
// TODO: Performance: Roll adjacent updates into a single update.
//println!("Update partial: {:?}", zone);
encoder.update_buffer(&self.remote, &self.local[zone.start..zone.start+zone.size], zone.start).unwrap();
*dirty = false
}
}
//println!("End update");
}
pub fn remote(&self) -> &Buffer<R, Vertex> {
&self.remote
}
pub fn slice(&self) -> Slice<R> {
Slice {
start: 0,
end: self.tail as u32,
base_vertex: 0,
instances: None,
buffer: IndexBuffer::Auto
}
}
}
impl<R> Extend<Vertex> for ManagedBuffer<R> where R: Resources {
fn extend<I>(&mut self, iter: I) where I: IntoIterator<Item=Vertex> {
if let Some(zone) = self.zones.last_mut() {
let old_len = self.local.len();
self.local.extend(iter);
let len = self.local.len() - old_len;
zone.0.size += len;
zone.1 = true;
self.tail += len;
} else {
panic!("Tried to extend to a previously created zone, but there are no zones.");
}
}
}
impl<'a, R> Extend<&'a Vertex> for ManagedBuffer<R> where R: Resources {
fn extend<I>(&mut self, iter: I) where I: IntoIterator<Item=&'a Vertex> {
if let Some(zone) = self.zones.last_mut() {
let old_len = self.local.len();
self.local.extend(iter);
let len = self.local.len() - old_len;
zone.0.size += len;
zone.1 = true;
self.tail += len;
} else |
}
} | {
panic!("Tried to extend to a previously created zone, but there are no zones.");
} | conditional_block |
managed.rs | use gfx::{Encoder, Resources, CommandBuffer, Slice, IndexBuffer};
use gfx::memory::{Usage, TRANSFER_DST};
use gfx::handle::Buffer;
use gfx::traits::{Factory, FactoryExt};
use gfx::buffer::Role;
use ui::render::Vertex;
// step: 128 vertices (4096 bytes, 42 triangles + 2 extra vertices)
const ALLOC_STEP: usize = 128;
#[derive(Debug)]
struct Zone {
start: usize,
size: usize
}
pub struct ManagedBuffer<R> where R: Resources {
local: Vec<Vertex>,
remote: Buffer<R, Vertex>,
zones: Vec<(Zone, bool)>,
tail: usize
}
impl<R> ManagedBuffer<R> where R: Resources {
pub fn new<F>(factory: &mut F) -> Self where F: Factory<R> {
ManagedBuffer {
local: Vec::new(),
remote: factory.create_buffer(ALLOC_STEP, Role::Vertex, Usage::Dynamic, TRANSFER_DST).unwrap(),
zones: Vec::new(),
tail: 0
}
}
pub fn new_zone(&mut self) -> usize {
self.zones.push((Zone {start: self.tail, size: 0}, true));
self.zones.len() - 1
}
pub fn replace_zone(&mut self, buffer: &[Vertex], zone: usize) { | *dirty = true;
if zone.size == buffer.len() {
let slice = &mut self.local[zone.start..zone.start + zone.size];
slice.copy_from_slice(buffer);
} else {
// TODO: Shift later elements forward or backwards.
unimplemented!()
}
}
fn get_zone(&self, index: usize) -> &[Vertex] {
let zone = &self.zones[index].0;
&self.local[zone.start..zone.start+zone.size]
}
// TODO: Handle errors.
pub fn update<F, C>(&mut self, factory: &mut F, encoder: &mut Encoder<R, C>) where F: Factory<R> + FactoryExt<R>, C: CommandBuffer<R> {
//println!("Begin update");
if self.local.len() > self.remote.len() {
// Full update
let (pages, other) = (self.local.len() / ALLOC_STEP, self.local.len() % ALLOC_STEP);
let pages = pages + if other!= 0 {1} else {0};
//println!("Full update {} -> {}", self.remote.len(), pages * ALLOC_STEP);
self.remote = factory.create_buffer(pages * ALLOC_STEP, Role::Vertex, Usage::Dynamic, TRANSFER_DST).unwrap();
encoder.update_buffer(&self.remote, &self.local[..self.tail], 0).unwrap();
} else {
// Partial update
for &mut (ref zone, ref mut dirty) in self.zones.iter_mut().filter(|&&mut (_, dirty)| dirty) {
// TODO: Performance: Roll adjacent updates into a single update.
//println!("Update partial: {:?}", zone);
encoder.update_buffer(&self.remote, &self.local[zone.start..zone.start+zone.size], zone.start).unwrap();
*dirty = false
}
}
//println!("End update");
}
pub fn remote(&self) -> &Buffer<R, Vertex> {
&self.remote
}
pub fn slice(&self) -> Slice<R> {
Slice {
start: 0,
end: self.tail as u32,
base_vertex: 0,
instances: None,
buffer: IndexBuffer::Auto
}
}
}
impl<R> Extend<Vertex> for ManagedBuffer<R> where R: Resources {
fn extend<I>(&mut self, iter: I) where I: IntoIterator<Item=Vertex> {
if let Some(zone) = self.zones.last_mut() {
let old_len = self.local.len();
self.local.extend(iter);
let len = self.local.len() - old_len;
zone.0.size += len;
zone.1 = true;
self.tail += len;
} else {
panic!("Tried to extend to a previously created zone, but there are no zones.");
}
}
}
impl<'a, R> Extend<&'a Vertex> for ManagedBuffer<R> where R: Resources {
fn extend<I>(&mut self, iter: I) where I: IntoIterator<Item=&'a Vertex> {
if let Some(zone) = self.zones.last_mut() {
let old_len = self.local.len();
self.local.extend(iter);
let len = self.local.len() - old_len;
zone.0.size += len;
zone.1 = true;
self.tail += len;
} else {
panic!("Tried to extend to a previously created zone, but there are no zones.");
}
}
} | let (ref mut zone, ref mut dirty) = self.zones[zone]; | random_line_split |
typeck_type_placeholder_mismatch.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This test checks that genuine type errors with partial
// type hints are understandable.
struct Foo<T>;
struct Bar<U>;
pub fn main() { | fn test1() {
let x: Foo<_> = Bar::<uint>;
//~^ ERROR mismatched types: expected `Foo<<generic #0>>` but found `Bar<uint>`
let y: Foo<uint> = x;
}
fn test2() {
let x: Foo<_> = Bar::<uint>;
//~^ ERROR mismatched types: expected `Foo<<generic #0>>` but found `Bar<uint>`
} | }
| random_line_split |
typeck_type_placeholder_mismatch.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This test checks that genuine type errors with partial
// type hints are understandable.
struct Foo<T>;
struct Bar<U>;
pub fn main() |
fn test1() {
let x: Foo<_> = Bar::<uint>;
//~^ ERROR mismatched types: expected `Foo<<generic #0>>` but found `Bar<uint>`
let y: Foo<uint> = x;
}
fn test2() {
let x: Foo<_> = Bar::<uint>;
//~^ ERROR mismatched types: expected `Foo<<generic #0>>` but found `Bar<uint>`
}
| {
} | identifier_body |
typeck_type_placeholder_mismatch.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This test checks that genuine type errors with partial
// type hints are understandable.
struct | <T>;
struct Bar<U>;
pub fn main() {
}
fn test1() {
let x: Foo<_> = Bar::<uint>;
//~^ ERROR mismatched types: expected `Foo<<generic #0>>` but found `Bar<uint>`
let y: Foo<uint> = x;
}
fn test2() {
let x: Foo<_> = Bar::<uint>;
//~^ ERROR mismatched types: expected `Foo<<generic #0>>` but found `Bar<uint>`
}
| Foo | identifier_name |
lzss2.rs | // Copyright 2016 Martin Grabmueller. See the LICENSE file at the
// top-level directory of this distribution for license information.
//! Simple implementation of an LZSS compressor.
use std::io::{Read, Write, Bytes};
use std::io;
use huff::adaptive as nested;
use error::Error;
const WINDOW_BITS: usize = 12;
const LENGTH_BITS: usize = 4;
const MIN_MATCH_LEN: usize = 2;
const MAX_MATCH_LEN: usize = ((1 << LENGTH_BITS) - 1) + MIN_MATCH_LEN;
const LOOK_AHEAD_BYTES: usize = MAX_MATCH_LEN;
const WINDOW_SIZE: usize = 1 << WINDOW_BITS;
const HASHTAB_SIZE: usize = 1 << 10;
/// Writer for LZSS compressed streams.
pub struct Writer<W> {
inner: nested::Writer<W>,
window: [u8; WINDOW_SIZE],
hashtab: [usize; HASHTAB_SIZE],
position: usize,
look_ahead_bytes: usize,
out_flags: u8,
out_count: usize,
out_data: [u8; 1 + 8*2],
out_len: usize,
}
#[inline(always)]
fn mod_window(x: usize) -> usize {
x % WINDOW_SIZE
}
impl<W: Write> Writer<W> {
/// Create a new LZSS writer that wraps the given Writer.
pub fn new(inner: W) -> Writer<W>{
Writer {
inner: nested::Writer::new(inner),
window: [0; WINDOW_SIZE],
hashtab: [0; HASHTAB_SIZE],
position: 0,
look_ahead_bytes: 0,
out_flags: 0,
out_count: 0,
out_data: [0; 1 + 8*2],
out_len: 1,
}
}
/// Output all buffered match/length pairs and literals.
fn emit_flush(&mut self) -> io::Result<()> {
if self.out_count > 0 {
if self.out_count < 8 {
self.out_flags <<= 8 - self.out_count;
}
self.out_data[0] = self.out_flags;
try!(self.inner.write_all(&self.out_data[..self.out_len]));
self.out_flags = 0;
self.out_count = 0;
self.out_len = 1;
}
Ok(())
}
/// Emit the literal byte `lit`.
fn emit_lit(&mut self, lit: u8) -> io::Result<()> {
if self.out_count == 8 {
try!(self.emit_flush());
}
self.out_count += 1;
self.out_flags = (self.out_flags << 1) | 1;
self.out_data[self.out_len] = lit;
self.out_len += 1;
Ok(())
}
/// Emit a match/length pair, which is already encoded in `m1` and
/// `m2`.
pub fn emit_match(&mut self, m1: u8, m2: u8) -> io::Result<()> {
if self.out_count == 8 {
try!(self.emit_flush());
}
self.out_count += 1;
self.out_flags = self.out_flags << 1;
self.out_data[self.out_len] = m1;
self.out_data[self.out_len + 1] = m2;
self.out_len += 2;
Ok(())
}
/// Calculate a hash of the next 3 bytes in the look-ahead buffer.
/// This hash is used to look up earlier occurences of the data we
/// are looking at. Because hash table entries are overwritten
/// blindly, we have to validate whatever we take out of the table
/// when calculating the match length.
fn hash_at(&self, pos: usize) -> usize {
// This might go over the data actually in the window, but as
// long as the compressor and decompressor maintain the same
// window contents, it should not matter.
let h1 = self.window[pos] as usize;
let h2 = self.window[mod_window(pos + 1)] as usize;
let h3 = self.window[mod_window(pos + 2)] as usize;
let h = (h1 >> 5) ^ ((h2 << 8) + h3);
h % HASHTAB_SIZE
}
fn find_longest_match(&self, match_pos: usize, search_pos: usize) -> usize {
if self.look_ahead_bytes > MIN_MATCH_LEN && match_pos!= search_pos {
let mut match_len = 0;
for i in 0..::std::cmp::min(self.look_ahead_bytes, MAX_MATCH_LEN) {
if self.window[mod_window(match_pos + i)]!= self.window[mod_window(search_pos + i)] {
break;
}
match_len += 1;
}
match_len
} else {
0
}
}
fn process(&mut self) -> io::Result<()> {
let search_pos = self.position;
let hsh = self.hash_at(search_pos);
let match_pos = self.hashtab[hsh];
let ofs =
if match_pos < self.position {
self.position - match_pos
} else {
self.position + (WINDOW_SIZE - match_pos)
};
let match_len = self.find_longest_match(match_pos, search_pos);
if ofs < WINDOW_SIZE - MAX_MATCH_LEN && match_len >= MIN_MATCH_LEN {
assert!(ofs!= 0);
assert!((match_len - MIN_MATCH_LEN) < 16);
let m1 = (((match_len - MIN_MATCH_LEN) as u8) << 4)
| (((ofs >> 8) as u8) & 0x0f);
let m2 = (ofs & 0xff) as u8;
try!(self.emit_match(m1, m2));
self.position = mod_window(self.position + match_len);
self.look_ahead_bytes -= match_len;
} else {
let lit = self.window[self.position];
try!(self.emit_lit(lit));
self.position = mod_window(self.position + 1);
self.look_ahead_bytes -= 1;
}
self.hashtab[hsh] = search_pos;
Ok(())
}
/// Move the wrapped writer out of the LZSS writer.
pub fn into_inner(self) -> W {
self.inner.into_inner()
}
}
impl<W: Write> Write for Writer<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let mut written = 0;
while written < buf.len() {
while written < buf.len() && self.look_ahead_bytes < LOOK_AHEAD_BYTES {
self.window[mod_window(self.position + self.look_ahead_bytes)] =
buf[written];
self.look_ahead_bytes += 1;
written += 1;
}
if self.look_ahead_bytes == LOOK_AHEAD_BYTES {
try!(self.process());
}
}
Ok(written)
}
fn flush(&mut self) -> io::Result<()> {
while self.look_ahead_bytes > 0 {
try!(self.process());
}
try!(self.emit_flush());
self.inner.flush()
}
}
/// Reader for LZSS compressed streams.
pub struct Reader<R> {
inner: Bytes<nested::Reader<R>>,
window: [u8; WINDOW_SIZE],
position: usize,
returned: usize,
eof: bool,
}
impl<R: Read> Reader<R> {
/// Create a new LZSS reader that wraps another reader.
pub fn new(inner: R) -> Reader<R> {
Reader {
inner: nested::Reader::new(inner).bytes(),
window: [0; WINDOW_SIZE],
position: 0,
returned: 0,
eof: false,
}
}
/// Copy all decompressed data from the window to the output
/// buffer.
fn copy_out(&mut self, output: &mut [u8], written: &mut usize) |
/// Process a group of 8 literals or match/length pairs. The
/// given token is contains the flag bits.
fn process_group(&mut self, token: u8) -> io::Result<()> {
for i in 0..8 {
if token & 0x80 >> i == 0 {
// Zero bit indicates a match/length pair. Decode the
// next two bytes into a 4-bit length and a 12-bit
// offset.
let mbm1 = self.inner.next();
let mbm2 = self.inner.next();
match (mbm1, mbm2) {
(None, None) => {
self.eof = true;
return Ok(());
}
(Some(m1), Some(m2)) => {
let m1 = try!(m1);
let m2 = try!(m2);
let len = ((m1 >> 4) as usize) + MIN_MATCH_LEN;
let ofs = (((m1 as usize) & 0xf) << 8) | (m2 as usize);
debug_assert!(ofs > 0);
let pos =
if ofs < self.position {
self.position - ofs
} else {
WINDOW_SIZE - (ofs - self.position)
};
for i in 0..len {
self.window[mod_window(self.position + i)] =
self.window[mod_window(pos + i)];
}
self.position = mod_window(self.position + len);
},
_ => {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"cannot read match/lit pair"));
},
}
} else {
// A 1-bit in the token indicates a literal. Just
// take the next byte from the input and add it to the
// window.
if let Some(lit) = self.inner.next() {
let lit = try!(lit);
self.window[self.position] = lit;
self.position = mod_window(self.position + 1);
} else {
// EOF here means corrupted input, because the
// encoder does not put a 1-bit into the token
// when the stream ends.
self.eof = true;
return Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"cannot read literal"));
}
}
}
Ok(())
}
/// Process as much from the underlying input as necessary to fill
/// the output buffer. When more data than necessary is
/// decompressed, it stays in the window for later processing.
fn process(&mut self, output: &mut [u8]) -> io::Result<usize> {
let mut written = 0;
// Copy out data that already was decompressed but did not fit
// into output last time.
self.copy_out(output, &mut written);
'outer:
while written < output.len() {
if let Some(token) = self.inner.next() {
let token = try!(token);
try!(self.process_group(token));
self.copy_out(output, &mut written);
} else {
self.eof = true;
break;
}
}
Ok(written)
}
}
impl<R: Read> Read for Reader<R> {
fn read(&mut self, output: &mut [u8]) -> io::Result<usize> {
if self.eof {
Ok(0)
} else {
self.process(output)
}
}
}
pub fn compress<R: Read, W: Write>(mut input: R, output: W) -> Result<W, Error> {
let mut cw = Writer::new(output);
try!(io::copy(&mut input, &mut cw));
try!(cw.flush());
Ok(cw.into_inner())
}
pub fn decompress<R: Read, W: Write>(input: R, mut output: W) -> Result<W, Error> {
let mut cr = Reader::new(input);
try!(io::copy(&mut cr, &mut output));
Ok(output)
}
#[cfg(test)]
mod tests {
use ::std::io::Cursor;
use super::{Writer, Reader};
use ::std::io::{Read, Write};
fn cmp_test(input: &[u8], expected_output: &[u8]) {
let mut cw = Writer::new(vec![]);
cw.write(&input[..]).unwrap();
cw.flush().unwrap();
let compressed = cw.into_inner();
assert_eq!(&expected_output[..], &compressed[..]);
}
#[test]
fn compress_empty() {
cmp_test(b"", &[0]);
}
#[test]
fn compress_a() {
cmp_test(b"a", &[192, 12, 40]);
}
#[test]
fn compress_aaa() {
cmp_test(b"aaaaaaaaa", &[192, 12, 35, 6, 2, 64]);
}
#[test]
fn compress_abc() {
cmp_test(b"abcdefgabcdefgabcabcabcdefg",
&[255, 12, 35, 22, 199, 178, 108, 181, 154, 179, 216, 10, 15, 64, 40, 132, 133, 100, 129, 201, 4, 138, 4]);
}
fn decmp_test(compressed: &[u8], expected_output: &[u8]) {
let mut cr = Reader::new(Cursor::new(compressed));
let mut decompressed = Vec::new();
let nread = cr.read_to_end(&mut decompressed).unwrap();
assert_eq!(expected_output.len(), nread);
assert_eq!(&expected_output[..], &decompressed[..]);
}
#[test]
fn decompress_empty() {
decmp_test(&[0], &[]);
}
#[test]
fn decompress_a() {
decmp_test(&[192, 12, 40], b"a");
}
#[test]
fn decompress_aaa() {
decmp_test(&[192, 12, 35, 6, 2, 64], b"aaaaaaaaa");
}
#[test]
fn decompress_abc() {
decmp_test(
&[255, 12, 35, 22, 199, 178, 108, 181, 154, 179, 216, 10, 15, 64, 40, 132, 133, 100, 129, 201, 4, 138, 4],
b"abcdefgabcdefgabcabcabcdefg");
}
fn roundtrip(input: &[u8]) {
let mut cw = Writer::new(vec![]);
cw.write_all(&input[..]).unwrap();
cw.flush().unwrap();
let compressed = cw.into_inner();
let mut cr = Reader::new(Cursor::new(compressed));
let mut decompressed = Vec::new();
let nread = cr.read_to_end(&mut decompressed).unwrap();
assert_eq!(input.len(), nread);
assert_eq!(&input[..], &decompressed[..]);
}
#[test]
fn compress_decompress() {
let input = include_bytes!("lzss2.rs");
roundtrip(input);
}
}
| {
while *written < output.len() && self.returned != self.position {
output[*written] = self.window[self.returned];
*written += 1;
self.returned = mod_window(self.returned + 1);
}
} | identifier_body |
lzss2.rs | // Copyright 2016 Martin Grabmueller. See the LICENSE file at the
// top-level directory of this distribution for license information.
//! Simple implementation of an LZSS compressor.
use std::io::{Read, Write, Bytes};
use std::io;
use huff::adaptive as nested;
use error::Error;
const WINDOW_BITS: usize = 12;
const LENGTH_BITS: usize = 4;
const MIN_MATCH_LEN: usize = 2;
const MAX_MATCH_LEN: usize = ((1 << LENGTH_BITS) - 1) + MIN_MATCH_LEN;
const LOOK_AHEAD_BYTES: usize = MAX_MATCH_LEN;
const WINDOW_SIZE: usize = 1 << WINDOW_BITS;
const HASHTAB_SIZE: usize = 1 << 10;
/// Writer for LZSS compressed streams.
pub struct Writer<W> {
inner: nested::Writer<W>,
window: [u8; WINDOW_SIZE],
hashtab: [usize; HASHTAB_SIZE],
position: usize,
look_ahead_bytes: usize,
out_flags: u8,
out_count: usize,
out_data: [u8; 1 + 8*2],
out_len: usize,
}
#[inline(always)]
fn mod_window(x: usize) -> usize {
x % WINDOW_SIZE
}
impl<W: Write> Writer<W> {
/// Create a new LZSS writer that wraps the given Writer.
pub fn new(inner: W) -> Writer<W>{
Writer {
inner: nested::Writer::new(inner),
window: [0; WINDOW_SIZE],
hashtab: [0; HASHTAB_SIZE],
position: 0,
look_ahead_bytes: 0,
out_flags: 0,
out_count: 0,
out_data: [0; 1 + 8*2],
out_len: 1,
}
}
/// Output all buffered match/length pairs and literals.
fn emit_flush(&mut self) -> io::Result<()> {
if self.out_count > 0 {
if self.out_count < 8 {
self.out_flags <<= 8 - self.out_count;
}
self.out_data[0] = self.out_flags;
try!(self.inner.write_all(&self.out_data[..self.out_len]));
self.out_flags = 0;
self.out_count = 0;
self.out_len = 1;
}
Ok(())
}
/// Emit the literal byte `lit`.
fn emit_lit(&mut self, lit: u8) -> io::Result<()> {
if self.out_count == 8 {
try!(self.emit_flush());
}
self.out_count += 1;
self.out_flags = (self.out_flags << 1) | 1;
self.out_data[self.out_len] = lit;
self.out_len += 1;
Ok(())
}
/// Emit a match/length pair, which is already encoded in `m1` and
/// `m2`.
pub fn emit_match(&mut self, m1: u8, m2: u8) -> io::Result<()> {
if self.out_count == 8 {
try!(self.emit_flush());
}
self.out_count += 1;
self.out_flags = self.out_flags << 1;
self.out_data[self.out_len] = m1;
self.out_data[self.out_len + 1] = m2;
self.out_len += 2;
Ok(())
}
/// Calculate a hash of the next 3 bytes in the look-ahead buffer.
/// This hash is used to look up earlier occurences of the data we
/// are looking at. Because hash table entries are overwritten
/// blindly, we have to validate whatever we take out of the table
/// when calculating the match length.
fn hash_at(&self, pos: usize) -> usize {
// This might go over the data actually in the window, but as
// long as the compressor and decompressor maintain the same
// window contents, it should not matter.
let h1 = self.window[pos] as usize;
let h2 = self.window[mod_window(pos + 1)] as usize;
let h3 = self.window[mod_window(pos + 2)] as usize;
let h = (h1 >> 5) ^ ((h2 << 8) + h3);
h % HASHTAB_SIZE
}
fn find_longest_match(&self, match_pos: usize, search_pos: usize) -> usize {
if self.look_ahead_bytes > MIN_MATCH_LEN && match_pos!= search_pos {
let mut match_len = 0;
for i in 0..::std::cmp::min(self.look_ahead_bytes, MAX_MATCH_LEN) {
if self.window[mod_window(match_pos + i)]!= self.window[mod_window(search_pos + i)] {
break;
}
match_len += 1;
}
match_len
} else {
0
}
}
fn process(&mut self) -> io::Result<()> {
let search_pos = self.position;
let hsh = self.hash_at(search_pos);
let match_pos = self.hashtab[hsh];
let ofs =
if match_pos < self.position {
self.position - match_pos
} else {
self.position + (WINDOW_SIZE - match_pos)
};
let match_len = self.find_longest_match(match_pos, search_pos);
if ofs < WINDOW_SIZE - MAX_MATCH_LEN && match_len >= MIN_MATCH_LEN {
assert!(ofs!= 0);
assert!((match_len - MIN_MATCH_LEN) < 16);
let m1 = (((match_len - MIN_MATCH_LEN) as u8) << 4)
| (((ofs >> 8) as u8) & 0x0f);
let m2 = (ofs & 0xff) as u8;
try!(self.emit_match(m1, m2));
self.position = mod_window(self.position + match_len);
self.look_ahead_bytes -= match_len;
} else {
let lit = self.window[self.position];
try!(self.emit_lit(lit));
self.position = mod_window(self.position + 1);
self.look_ahead_bytes -= 1;
}
self.hashtab[hsh] = search_pos;
Ok(())
}
/// Move the wrapped writer out of the LZSS writer.
pub fn into_inner(self) -> W {
self.inner.into_inner()
}
}
impl<W: Write> Write for Writer<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let mut written = 0;
while written < buf.len() {
while written < buf.len() && self.look_ahead_bytes < LOOK_AHEAD_BYTES {
self.window[mod_window(self.position + self.look_ahead_bytes)] =
buf[written];
self.look_ahead_bytes += 1;
written += 1;
}
if self.look_ahead_bytes == LOOK_AHEAD_BYTES {
try!(self.process());
}
}
Ok(written)
} | fn flush(&mut self) -> io::Result<()> {
while self.look_ahead_bytes > 0 {
try!(self.process());
}
try!(self.emit_flush());
self.inner.flush()
}
}
/// Reader for LZSS compressed streams.
pub struct Reader<R> {
inner: Bytes<nested::Reader<R>>,
window: [u8; WINDOW_SIZE],
position: usize,
returned: usize,
eof: bool,
}
impl<R: Read> Reader<R> {
/// Create a new LZSS reader that wraps another reader.
pub fn new(inner: R) -> Reader<R> {
Reader {
inner: nested::Reader::new(inner).bytes(),
window: [0; WINDOW_SIZE],
position: 0,
returned: 0,
eof: false,
}
}
/// Copy all decompressed data from the window to the output
/// buffer.
fn copy_out(&mut self, output: &mut [u8], written: &mut usize) {
while *written < output.len() && self.returned!= self.position {
output[*written] = self.window[self.returned];
*written += 1;
self.returned = mod_window(self.returned + 1);
}
}
/// Process a group of 8 literals or match/length pairs. The
/// given token is contains the flag bits.
fn process_group(&mut self, token: u8) -> io::Result<()> {
for i in 0..8 {
if token & 0x80 >> i == 0 {
// Zero bit indicates a match/length pair. Decode the
// next two bytes into a 4-bit length and a 12-bit
// offset.
let mbm1 = self.inner.next();
let mbm2 = self.inner.next();
match (mbm1, mbm2) {
(None, None) => {
self.eof = true;
return Ok(());
}
(Some(m1), Some(m2)) => {
let m1 = try!(m1);
let m2 = try!(m2);
let len = ((m1 >> 4) as usize) + MIN_MATCH_LEN;
let ofs = (((m1 as usize) & 0xf) << 8) | (m2 as usize);
debug_assert!(ofs > 0);
let pos =
if ofs < self.position {
self.position - ofs
} else {
WINDOW_SIZE - (ofs - self.position)
};
for i in 0..len {
self.window[mod_window(self.position + i)] =
self.window[mod_window(pos + i)];
}
self.position = mod_window(self.position + len);
},
_ => {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"cannot read match/lit pair"));
},
}
} else {
// A 1-bit in the token indicates a literal. Just
// take the next byte from the input and add it to the
// window.
if let Some(lit) = self.inner.next() {
let lit = try!(lit);
self.window[self.position] = lit;
self.position = mod_window(self.position + 1);
} else {
// EOF here means corrupted input, because the
// encoder does not put a 1-bit into the token
// when the stream ends.
self.eof = true;
return Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"cannot read literal"));
}
}
}
Ok(())
}
/// Process as much from the underlying input as necessary to fill
/// the output buffer. When more data than necessary is
/// decompressed, it stays in the window for later processing.
fn process(&mut self, output: &mut [u8]) -> io::Result<usize> {
let mut written = 0;
// Copy out data that already was decompressed but did not fit
// into output last time.
self.copy_out(output, &mut written);
'outer:
while written < output.len() {
if let Some(token) = self.inner.next() {
let token = try!(token);
try!(self.process_group(token));
self.copy_out(output, &mut written);
} else {
self.eof = true;
break;
}
}
Ok(written)
}
}
impl<R: Read> Read for Reader<R> {
fn read(&mut self, output: &mut [u8]) -> io::Result<usize> {
if self.eof {
Ok(0)
} else {
self.process(output)
}
}
}
pub fn compress<R: Read, W: Write>(mut input: R, output: W) -> Result<W, Error> {
let mut cw = Writer::new(output);
try!(io::copy(&mut input, &mut cw));
try!(cw.flush());
Ok(cw.into_inner())
}
pub fn decompress<R: Read, W: Write>(input: R, mut output: W) -> Result<W, Error> {
let mut cr = Reader::new(input);
try!(io::copy(&mut cr, &mut output));
Ok(output)
}
#[cfg(test)]
mod tests {
use ::std::io::Cursor;
use super::{Writer, Reader};
use ::std::io::{Read, Write};
fn cmp_test(input: &[u8], expected_output: &[u8]) {
let mut cw = Writer::new(vec![]);
cw.write(&input[..]).unwrap();
cw.flush().unwrap();
let compressed = cw.into_inner();
assert_eq!(&expected_output[..], &compressed[..]);
}
#[test]
fn compress_empty() {
cmp_test(b"", &[0]);
}
#[test]
fn compress_a() {
cmp_test(b"a", &[192, 12, 40]);
}
#[test]
fn compress_aaa() {
cmp_test(b"aaaaaaaaa", &[192, 12, 35, 6, 2, 64]);
}
#[test]
fn compress_abc() {
cmp_test(b"abcdefgabcdefgabcabcabcdefg",
&[255, 12, 35, 22, 199, 178, 108, 181, 154, 179, 216, 10, 15, 64, 40, 132, 133, 100, 129, 201, 4, 138, 4]);
}
fn decmp_test(compressed: &[u8], expected_output: &[u8]) {
let mut cr = Reader::new(Cursor::new(compressed));
let mut decompressed = Vec::new();
let nread = cr.read_to_end(&mut decompressed).unwrap();
assert_eq!(expected_output.len(), nread);
assert_eq!(&expected_output[..], &decompressed[..]);
}
#[test]
fn decompress_empty() {
decmp_test(&[0], &[]);
}
#[test]
fn decompress_a() {
decmp_test(&[192, 12, 40], b"a");
}
#[test]
fn decompress_aaa() {
decmp_test(&[192, 12, 35, 6, 2, 64], b"aaaaaaaaa");
}
#[test]
fn decompress_abc() {
decmp_test(
&[255, 12, 35, 22, 199, 178, 108, 181, 154, 179, 216, 10, 15, 64, 40, 132, 133, 100, 129, 201, 4, 138, 4],
b"abcdefgabcdefgabcabcabcdefg");
}
fn roundtrip(input: &[u8]) {
let mut cw = Writer::new(vec![]);
cw.write_all(&input[..]).unwrap();
cw.flush().unwrap();
let compressed = cw.into_inner();
let mut cr = Reader::new(Cursor::new(compressed));
let mut decompressed = Vec::new();
let nread = cr.read_to_end(&mut decompressed).unwrap();
assert_eq!(input.len(), nread);
assert_eq!(&input[..], &decompressed[..]);
}
#[test]
fn compress_decompress() {
let input = include_bytes!("lzss2.rs");
roundtrip(input);
}
} | random_line_split |
|
lzss2.rs | // Copyright 2016 Martin Grabmueller. See the LICENSE file at the
// top-level directory of this distribution for license information.
//! Simple implementation of an LZSS compressor.
use std::io::{Read, Write, Bytes};
use std::io;
use huff::adaptive as nested;
use error::Error;
const WINDOW_BITS: usize = 12;
const LENGTH_BITS: usize = 4;
const MIN_MATCH_LEN: usize = 2;
const MAX_MATCH_LEN: usize = ((1 << LENGTH_BITS) - 1) + MIN_MATCH_LEN;
const LOOK_AHEAD_BYTES: usize = MAX_MATCH_LEN;
const WINDOW_SIZE: usize = 1 << WINDOW_BITS;
const HASHTAB_SIZE: usize = 1 << 10;
/// Writer for LZSS compressed streams.
pub struct Writer<W> {
inner: nested::Writer<W>,
window: [u8; WINDOW_SIZE],
hashtab: [usize; HASHTAB_SIZE],
position: usize,
look_ahead_bytes: usize,
out_flags: u8,
out_count: usize,
out_data: [u8; 1 + 8*2],
out_len: usize,
}
#[inline(always)]
fn mod_window(x: usize) -> usize {
x % WINDOW_SIZE
}
impl<W: Write> Writer<W> {
/// Create a new LZSS writer that wraps the given Writer.
pub fn new(inner: W) -> Writer<W>{
Writer {
inner: nested::Writer::new(inner),
window: [0; WINDOW_SIZE],
hashtab: [0; HASHTAB_SIZE],
position: 0,
look_ahead_bytes: 0,
out_flags: 0,
out_count: 0,
out_data: [0; 1 + 8*2],
out_len: 1,
}
}
/// Output all buffered match/length pairs and literals.
fn emit_flush(&mut self) -> io::Result<()> {
if self.out_count > 0 {
if self.out_count < 8 {
self.out_flags <<= 8 - self.out_count;
}
self.out_data[0] = self.out_flags;
try!(self.inner.write_all(&self.out_data[..self.out_len]));
self.out_flags = 0;
self.out_count = 0;
self.out_len = 1;
}
Ok(())
}
/// Emit the literal byte `lit`.
fn emit_lit(&mut self, lit: u8) -> io::Result<()> {
if self.out_count == 8 {
try!(self.emit_flush());
}
self.out_count += 1;
self.out_flags = (self.out_flags << 1) | 1;
self.out_data[self.out_len] = lit;
self.out_len += 1;
Ok(())
}
/// Emit a match/length pair, which is already encoded in `m1` and
/// `m2`.
pub fn emit_match(&mut self, m1: u8, m2: u8) -> io::Result<()> {
if self.out_count == 8 {
try!(self.emit_flush());
}
self.out_count += 1;
self.out_flags = self.out_flags << 1;
self.out_data[self.out_len] = m1;
self.out_data[self.out_len + 1] = m2;
self.out_len += 2;
Ok(())
}
/// Calculate a hash of the next 3 bytes in the look-ahead buffer.
/// This hash is used to look up earlier occurences of the data we
/// are looking at. Because hash table entries are overwritten
/// blindly, we have to validate whatever we take out of the table
/// when calculating the match length.
fn hash_at(&self, pos: usize) -> usize {
// This might go over the data actually in the window, but as
// long as the compressor and decompressor maintain the same
// window contents, it should not matter.
let h1 = self.window[pos] as usize;
let h2 = self.window[mod_window(pos + 1)] as usize;
let h3 = self.window[mod_window(pos + 2)] as usize;
let h = (h1 >> 5) ^ ((h2 << 8) + h3);
h % HASHTAB_SIZE
}
fn find_longest_match(&self, match_pos: usize, search_pos: usize) -> usize {
if self.look_ahead_bytes > MIN_MATCH_LEN && match_pos!= search_pos {
let mut match_len = 0;
for i in 0..::std::cmp::min(self.look_ahead_bytes, MAX_MATCH_LEN) {
if self.window[mod_window(match_pos + i)]!= self.window[mod_window(search_pos + i)] {
break;
}
match_len += 1;
}
match_len
} else {
0
}
}
fn process(&mut self) -> io::Result<()> {
let search_pos = self.position;
let hsh = self.hash_at(search_pos);
let match_pos = self.hashtab[hsh];
let ofs =
if match_pos < self.position {
self.position - match_pos
} else {
self.position + (WINDOW_SIZE - match_pos)
};
let match_len = self.find_longest_match(match_pos, search_pos);
if ofs < WINDOW_SIZE - MAX_MATCH_LEN && match_len >= MIN_MATCH_LEN {
assert!(ofs!= 0);
assert!((match_len - MIN_MATCH_LEN) < 16);
let m1 = (((match_len - MIN_MATCH_LEN) as u8) << 4)
| (((ofs >> 8) as u8) & 0x0f);
let m2 = (ofs & 0xff) as u8;
try!(self.emit_match(m1, m2));
self.position = mod_window(self.position + match_len);
self.look_ahead_bytes -= match_len;
} else {
let lit = self.window[self.position];
try!(self.emit_lit(lit));
self.position = mod_window(self.position + 1);
self.look_ahead_bytes -= 1;
}
self.hashtab[hsh] = search_pos;
Ok(())
}
/// Move the wrapped writer out of the LZSS writer.
pub fn into_inner(self) -> W {
self.inner.into_inner()
}
}
impl<W: Write> Write for Writer<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let mut written = 0;
while written < buf.len() {
while written < buf.len() && self.look_ahead_bytes < LOOK_AHEAD_BYTES {
self.window[mod_window(self.position + self.look_ahead_bytes)] =
buf[written];
self.look_ahead_bytes += 1;
written += 1;
}
if self.look_ahead_bytes == LOOK_AHEAD_BYTES {
try!(self.process());
}
}
Ok(written)
}
fn flush(&mut self) -> io::Result<()> {
while self.look_ahead_bytes > 0 {
try!(self.process());
}
try!(self.emit_flush());
self.inner.flush()
}
}
/// Reader for LZSS compressed streams.
pub struct Reader<R> {
inner: Bytes<nested::Reader<R>>,
window: [u8; WINDOW_SIZE],
position: usize,
returned: usize,
eof: bool,
}
impl<R: Read> Reader<R> {
/// Create a new LZSS reader that wraps another reader.
pub fn | (inner: R) -> Reader<R> {
Reader {
inner: nested::Reader::new(inner).bytes(),
window: [0; WINDOW_SIZE],
position: 0,
returned: 0,
eof: false,
}
}
/// Copy all decompressed data from the window to the output
/// buffer.
fn copy_out(&mut self, output: &mut [u8], written: &mut usize) {
while *written < output.len() && self.returned!= self.position {
output[*written] = self.window[self.returned];
*written += 1;
self.returned = mod_window(self.returned + 1);
}
}
/// Process a group of 8 literals or match/length pairs. The
/// given token is contains the flag bits.
fn process_group(&mut self, token: u8) -> io::Result<()> {
for i in 0..8 {
if token & 0x80 >> i == 0 {
// Zero bit indicates a match/length pair. Decode the
// next two bytes into a 4-bit length and a 12-bit
// offset.
let mbm1 = self.inner.next();
let mbm2 = self.inner.next();
match (mbm1, mbm2) {
(None, None) => {
self.eof = true;
return Ok(());
}
(Some(m1), Some(m2)) => {
let m1 = try!(m1);
let m2 = try!(m2);
let len = ((m1 >> 4) as usize) + MIN_MATCH_LEN;
let ofs = (((m1 as usize) & 0xf) << 8) | (m2 as usize);
debug_assert!(ofs > 0);
let pos =
if ofs < self.position {
self.position - ofs
} else {
WINDOW_SIZE - (ofs - self.position)
};
for i in 0..len {
self.window[mod_window(self.position + i)] =
self.window[mod_window(pos + i)];
}
self.position = mod_window(self.position + len);
},
_ => {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"cannot read match/lit pair"));
},
}
} else {
// A 1-bit in the token indicates a literal. Just
// take the next byte from the input and add it to the
// window.
if let Some(lit) = self.inner.next() {
let lit = try!(lit);
self.window[self.position] = lit;
self.position = mod_window(self.position + 1);
} else {
// EOF here means corrupted input, because the
// encoder does not put a 1-bit into the token
// when the stream ends.
self.eof = true;
return Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"cannot read literal"));
}
}
}
Ok(())
}
/// Process as much from the underlying input as necessary to fill
/// the output buffer. When more data than necessary is
/// decompressed, it stays in the window for later processing.
fn process(&mut self, output: &mut [u8]) -> io::Result<usize> {
let mut written = 0;
// Copy out data that already was decompressed but did not fit
// into output last time.
self.copy_out(output, &mut written);
'outer:
while written < output.len() {
if let Some(token) = self.inner.next() {
let token = try!(token);
try!(self.process_group(token));
self.copy_out(output, &mut written);
} else {
self.eof = true;
break;
}
}
Ok(written)
}
}
impl<R: Read> Read for Reader<R> {
fn read(&mut self, output: &mut [u8]) -> io::Result<usize> {
if self.eof {
Ok(0)
} else {
self.process(output)
}
}
}
pub fn compress<R: Read, W: Write>(mut input: R, output: W) -> Result<W, Error> {
let mut cw = Writer::new(output);
try!(io::copy(&mut input, &mut cw));
try!(cw.flush());
Ok(cw.into_inner())
}
pub fn decompress<R: Read, W: Write>(input: R, mut output: W) -> Result<W, Error> {
let mut cr = Reader::new(input);
try!(io::copy(&mut cr, &mut output));
Ok(output)
}
#[cfg(test)]
mod tests {
use ::std::io::Cursor;
use super::{Writer, Reader};
use ::std::io::{Read, Write};
fn cmp_test(input: &[u8], expected_output: &[u8]) {
let mut cw = Writer::new(vec![]);
cw.write(&input[..]).unwrap();
cw.flush().unwrap();
let compressed = cw.into_inner();
assert_eq!(&expected_output[..], &compressed[..]);
}
#[test]
fn compress_empty() {
cmp_test(b"", &[0]);
}
#[test]
fn compress_a() {
cmp_test(b"a", &[192, 12, 40]);
}
#[test]
fn compress_aaa() {
cmp_test(b"aaaaaaaaa", &[192, 12, 35, 6, 2, 64]);
}
#[test]
fn compress_abc() {
cmp_test(b"abcdefgabcdefgabcabcabcdefg",
&[255, 12, 35, 22, 199, 178, 108, 181, 154, 179, 216, 10, 15, 64, 40, 132, 133, 100, 129, 201, 4, 138, 4]);
}
fn decmp_test(compressed: &[u8], expected_output: &[u8]) {
let mut cr = Reader::new(Cursor::new(compressed));
let mut decompressed = Vec::new();
let nread = cr.read_to_end(&mut decompressed).unwrap();
assert_eq!(expected_output.len(), nread);
assert_eq!(&expected_output[..], &decompressed[..]);
}
#[test]
fn decompress_empty() {
decmp_test(&[0], &[]);
}
#[test]
fn decompress_a() {
decmp_test(&[192, 12, 40], b"a");
}
#[test]
fn decompress_aaa() {
decmp_test(&[192, 12, 35, 6, 2, 64], b"aaaaaaaaa");
}
#[test]
fn decompress_abc() {
decmp_test(
&[255, 12, 35, 22, 199, 178, 108, 181, 154, 179, 216, 10, 15, 64, 40, 132, 133, 100, 129, 201, 4, 138, 4],
b"abcdefgabcdefgabcabcabcdefg");
}
fn roundtrip(input: &[u8]) {
let mut cw = Writer::new(vec![]);
cw.write_all(&input[..]).unwrap();
cw.flush().unwrap();
let compressed = cw.into_inner();
let mut cr = Reader::new(Cursor::new(compressed));
let mut decompressed = Vec::new();
let nread = cr.read_to_end(&mut decompressed).unwrap();
assert_eq!(input.len(), nread);
assert_eq!(&input[..], &decompressed[..]);
}
#[test]
fn compress_decompress() {
let input = include_bytes!("lzss2.rs");
roundtrip(input);
}
}
| new | identifier_name |
lzss2.rs | // Copyright 2016 Martin Grabmueller. See the LICENSE file at the
// top-level directory of this distribution for license information.
//! Simple implementation of an LZSS compressor.
use std::io::{Read, Write, Bytes};
use std::io;
use huff::adaptive as nested;
use error::Error;
const WINDOW_BITS: usize = 12;
const LENGTH_BITS: usize = 4;
const MIN_MATCH_LEN: usize = 2;
const MAX_MATCH_LEN: usize = ((1 << LENGTH_BITS) - 1) + MIN_MATCH_LEN;
const LOOK_AHEAD_BYTES: usize = MAX_MATCH_LEN;
const WINDOW_SIZE: usize = 1 << WINDOW_BITS;
const HASHTAB_SIZE: usize = 1 << 10;
/// Writer for LZSS compressed streams.
pub struct Writer<W> {
inner: nested::Writer<W>,
window: [u8; WINDOW_SIZE],
hashtab: [usize; HASHTAB_SIZE],
position: usize,
look_ahead_bytes: usize,
out_flags: u8,
out_count: usize,
out_data: [u8; 1 + 8*2],
out_len: usize,
}
#[inline(always)]
fn mod_window(x: usize) -> usize {
x % WINDOW_SIZE
}
impl<W: Write> Writer<W> {
/// Create a new LZSS writer that wraps the given Writer.
pub fn new(inner: W) -> Writer<W>{
Writer {
inner: nested::Writer::new(inner),
window: [0; WINDOW_SIZE],
hashtab: [0; HASHTAB_SIZE],
position: 0,
look_ahead_bytes: 0,
out_flags: 0,
out_count: 0,
out_data: [0; 1 + 8*2],
out_len: 1,
}
}
/// Output all buffered match/length pairs and literals.
fn emit_flush(&mut self) -> io::Result<()> {
if self.out_count > 0 {
if self.out_count < 8 {
self.out_flags <<= 8 - self.out_count;
}
self.out_data[0] = self.out_flags;
try!(self.inner.write_all(&self.out_data[..self.out_len]));
self.out_flags = 0;
self.out_count = 0;
self.out_len = 1;
}
Ok(())
}
/// Emit the literal byte `lit`.
fn emit_lit(&mut self, lit: u8) -> io::Result<()> {
if self.out_count == 8 |
self.out_count += 1;
self.out_flags = (self.out_flags << 1) | 1;
self.out_data[self.out_len] = lit;
self.out_len += 1;
Ok(())
}
/// Emit a match/length pair, which is already encoded in `m1` and
/// `m2`.
pub fn emit_match(&mut self, m1: u8, m2: u8) -> io::Result<()> {
if self.out_count == 8 {
try!(self.emit_flush());
}
self.out_count += 1;
self.out_flags = self.out_flags << 1;
self.out_data[self.out_len] = m1;
self.out_data[self.out_len + 1] = m2;
self.out_len += 2;
Ok(())
}
/// Calculate a hash of the next 3 bytes in the look-ahead buffer.
/// This hash is used to look up earlier occurences of the data we
/// are looking at. Because hash table entries are overwritten
/// blindly, we have to validate whatever we take out of the table
/// when calculating the match length.
fn hash_at(&self, pos: usize) -> usize {
// This might go over the data actually in the window, but as
// long as the compressor and decompressor maintain the same
// window contents, it should not matter.
let h1 = self.window[pos] as usize;
let h2 = self.window[mod_window(pos + 1)] as usize;
let h3 = self.window[mod_window(pos + 2)] as usize;
let h = (h1 >> 5) ^ ((h2 << 8) + h3);
h % HASHTAB_SIZE
}
fn find_longest_match(&self, match_pos: usize, search_pos: usize) -> usize {
if self.look_ahead_bytes > MIN_MATCH_LEN && match_pos!= search_pos {
let mut match_len = 0;
for i in 0..::std::cmp::min(self.look_ahead_bytes, MAX_MATCH_LEN) {
if self.window[mod_window(match_pos + i)]!= self.window[mod_window(search_pos + i)] {
break;
}
match_len += 1;
}
match_len
} else {
0
}
}
fn process(&mut self) -> io::Result<()> {
let search_pos = self.position;
let hsh = self.hash_at(search_pos);
let match_pos = self.hashtab[hsh];
let ofs =
if match_pos < self.position {
self.position - match_pos
} else {
self.position + (WINDOW_SIZE - match_pos)
};
let match_len = self.find_longest_match(match_pos, search_pos);
if ofs < WINDOW_SIZE - MAX_MATCH_LEN && match_len >= MIN_MATCH_LEN {
assert!(ofs!= 0);
assert!((match_len - MIN_MATCH_LEN) < 16);
let m1 = (((match_len - MIN_MATCH_LEN) as u8) << 4)
| (((ofs >> 8) as u8) & 0x0f);
let m2 = (ofs & 0xff) as u8;
try!(self.emit_match(m1, m2));
self.position = mod_window(self.position + match_len);
self.look_ahead_bytes -= match_len;
} else {
let lit = self.window[self.position];
try!(self.emit_lit(lit));
self.position = mod_window(self.position + 1);
self.look_ahead_bytes -= 1;
}
self.hashtab[hsh] = search_pos;
Ok(())
}
/// Move the wrapped writer out of the LZSS writer.
pub fn into_inner(self) -> W {
self.inner.into_inner()
}
}
impl<W: Write> Write for Writer<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let mut written = 0;
while written < buf.len() {
while written < buf.len() && self.look_ahead_bytes < LOOK_AHEAD_BYTES {
self.window[mod_window(self.position + self.look_ahead_bytes)] =
buf[written];
self.look_ahead_bytes += 1;
written += 1;
}
if self.look_ahead_bytes == LOOK_AHEAD_BYTES {
try!(self.process());
}
}
Ok(written)
}
fn flush(&mut self) -> io::Result<()> {
while self.look_ahead_bytes > 0 {
try!(self.process());
}
try!(self.emit_flush());
self.inner.flush()
}
}
/// Reader for LZSS compressed streams.
pub struct Reader<R> {
inner: Bytes<nested::Reader<R>>,
window: [u8; WINDOW_SIZE],
position: usize,
returned: usize,
eof: bool,
}
impl<R: Read> Reader<R> {
/// Create a new LZSS reader that wraps another reader.
pub fn new(inner: R) -> Reader<R> {
Reader {
inner: nested::Reader::new(inner).bytes(),
window: [0; WINDOW_SIZE],
position: 0,
returned: 0,
eof: false,
}
}
/// Copy all decompressed data from the window to the output
/// buffer.
fn copy_out(&mut self, output: &mut [u8], written: &mut usize) {
while *written < output.len() && self.returned!= self.position {
output[*written] = self.window[self.returned];
*written += 1;
self.returned = mod_window(self.returned + 1);
}
}
/// Process a group of 8 literals or match/length pairs. The
/// given token is contains the flag bits.
fn process_group(&mut self, token: u8) -> io::Result<()> {
for i in 0..8 {
if token & 0x80 >> i == 0 {
// Zero bit indicates a match/length pair. Decode the
// next two bytes into a 4-bit length and a 12-bit
// offset.
let mbm1 = self.inner.next();
let mbm2 = self.inner.next();
match (mbm1, mbm2) {
(None, None) => {
self.eof = true;
return Ok(());
}
(Some(m1), Some(m2)) => {
let m1 = try!(m1);
let m2 = try!(m2);
let len = ((m1 >> 4) as usize) + MIN_MATCH_LEN;
let ofs = (((m1 as usize) & 0xf) << 8) | (m2 as usize);
debug_assert!(ofs > 0);
let pos =
if ofs < self.position {
self.position - ofs
} else {
WINDOW_SIZE - (ofs - self.position)
};
for i in 0..len {
self.window[mod_window(self.position + i)] =
self.window[mod_window(pos + i)];
}
self.position = mod_window(self.position + len);
},
_ => {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"cannot read match/lit pair"));
},
}
} else {
// A 1-bit in the token indicates a literal. Just
// take the next byte from the input and add it to the
// window.
if let Some(lit) = self.inner.next() {
let lit = try!(lit);
self.window[self.position] = lit;
self.position = mod_window(self.position + 1);
} else {
// EOF here means corrupted input, because the
// encoder does not put a 1-bit into the token
// when the stream ends.
self.eof = true;
return Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"cannot read literal"));
}
}
}
Ok(())
}
/// Process as much from the underlying input as necessary to fill
/// the output buffer. When more data than necessary is
/// decompressed, it stays in the window for later processing.
fn process(&mut self, output: &mut [u8]) -> io::Result<usize> {
let mut written = 0;
// Copy out data that already was decompressed but did not fit
// into output last time.
self.copy_out(output, &mut written);
'outer:
while written < output.len() {
if let Some(token) = self.inner.next() {
let token = try!(token);
try!(self.process_group(token));
self.copy_out(output, &mut written);
} else {
self.eof = true;
break;
}
}
Ok(written)
}
}
impl<R: Read> Read for Reader<R> {
fn read(&mut self, output: &mut [u8]) -> io::Result<usize> {
if self.eof {
Ok(0)
} else {
self.process(output)
}
}
}
pub fn compress<R: Read, W: Write>(mut input: R, output: W) -> Result<W, Error> {
let mut cw = Writer::new(output);
try!(io::copy(&mut input, &mut cw));
try!(cw.flush());
Ok(cw.into_inner())
}
pub fn decompress<R: Read, W: Write>(input: R, mut output: W) -> Result<W, Error> {
let mut cr = Reader::new(input);
try!(io::copy(&mut cr, &mut output));
Ok(output)
}
#[cfg(test)]
mod tests {
use ::std::io::Cursor;
use super::{Writer, Reader};
use ::std::io::{Read, Write};
fn cmp_test(input: &[u8], expected_output: &[u8]) {
let mut cw = Writer::new(vec![]);
cw.write(&input[..]).unwrap();
cw.flush().unwrap();
let compressed = cw.into_inner();
assert_eq!(&expected_output[..], &compressed[..]);
}
#[test]
fn compress_empty() {
cmp_test(b"", &[0]);
}
#[test]
fn compress_a() {
cmp_test(b"a", &[192, 12, 40]);
}
#[test]
fn compress_aaa() {
cmp_test(b"aaaaaaaaa", &[192, 12, 35, 6, 2, 64]);
}
#[test]
fn compress_abc() {
cmp_test(b"abcdefgabcdefgabcabcabcdefg",
&[255, 12, 35, 22, 199, 178, 108, 181, 154, 179, 216, 10, 15, 64, 40, 132, 133, 100, 129, 201, 4, 138, 4]);
}
fn decmp_test(compressed: &[u8], expected_output: &[u8]) {
let mut cr = Reader::new(Cursor::new(compressed));
let mut decompressed = Vec::new();
let nread = cr.read_to_end(&mut decompressed).unwrap();
assert_eq!(expected_output.len(), nread);
assert_eq!(&expected_output[..], &decompressed[..]);
}
#[test]
fn decompress_empty() {
decmp_test(&[0], &[]);
}
#[test]
fn decompress_a() {
decmp_test(&[192, 12, 40], b"a");
}
#[test]
fn decompress_aaa() {
decmp_test(&[192, 12, 35, 6, 2, 64], b"aaaaaaaaa");
}
#[test]
fn decompress_abc() {
decmp_test(
&[255, 12, 35, 22, 199, 178, 108, 181, 154, 179, 216, 10, 15, 64, 40, 132, 133, 100, 129, 201, 4, 138, 4],
b"abcdefgabcdefgabcabcabcdefg");
}
fn roundtrip(input: &[u8]) {
let mut cw = Writer::new(vec![]);
cw.write_all(&input[..]).unwrap();
cw.flush().unwrap();
let compressed = cw.into_inner();
let mut cr = Reader::new(Cursor::new(compressed));
let mut decompressed = Vec::new();
let nread = cr.read_to_end(&mut decompressed).unwrap();
assert_eq!(input.len(), nread);
assert_eq!(&input[..], &decompressed[..]);
}
#[test]
fn compress_decompress() {
let input = include_bytes!("lzss2.rs");
roundtrip(input);
}
}
| {
try!(self.emit_flush());
} | conditional_block |
mouse.rs | use libc::{c_int, c_uint, c_void, uint8_t, uint32_t};
use surface::SDL_Surface;
use video::SDL_Window;
use sdl::SDL_bool;
use event::SDL_State;
pub type SDL_Cursor = c_void;
pub type SDL_SystemCursor = c_uint;
pub const SDL_SYSTEM_CURSOR_ARROW: SDL_SystemCursor = 0;
pub const SDL_SYSTEM_CURSOR_IBEAM: SDL_SystemCursor = 1;
pub const SDL_SYSTEM_CURSOR_WAIT: SDL_SystemCursor = 2;
pub const SDL_SYSTEM_CURSOR_CROSSHAIR: SDL_SystemCursor = 3;
pub const SDL_SYSTEM_CURSOR_WAITARROW: SDL_SystemCursor = 4;
pub const SDL_SYSTEM_CURSOR_SIZENWSE: SDL_SystemCursor = 5; | pub const SDL_SYSTEM_CURSOR_SIZENESW: SDL_SystemCursor = 6;
pub const SDL_SYSTEM_CURSOR_SIZEWE: SDL_SystemCursor = 7;
pub const SDL_SYSTEM_CURSOR_SIZENS: SDL_SystemCursor = 8;
pub const SDL_SYSTEM_CURSOR_SIZEALL: SDL_SystemCursor = 9;
pub const SDL_SYSTEM_CURSOR_NO: SDL_SystemCursor = 10;
pub const SDL_SYSTEM_CURSOR_HAND: SDL_SystemCursor = 11;
pub const SDL_NUM_SYSTEM_CURSORS: SDL_SystemCursor = 12;
pub const SDL_BUTTON_LEFT: u8 = 1;
pub const SDL_BUTTON_MIDDLE: u8 = 2;
pub const SDL_BUTTON_RIGHT: u8 = 3;
pub const SDL_BUTTON_X1: u8 = 4;
pub const SDL_BUTTON_X2: u8 = 5;
pub const SDL_BUTTON_LMASK: u32 = 0x01;
pub const SDL_BUTTON_MMASK: u32 = 0x02;
pub const SDL_BUTTON_RMASK: u32 = 0x04;
pub const SDL_BUTTON_X1MASK: u32 = 0x08;
pub const SDL_BUTTON_X2MASK: u32 = 0x10;
extern "C" {
pub fn SDL_GetMouseFocus() -> *mut SDL_Window;
pub fn SDL_GetMouseState(x: *mut c_int, y: *mut c_int) -> uint32_t;
pub fn SDL_GetRelativeMouseState(x: *mut c_int, y: *mut c_int) -> uint32_t;
pub fn SDL_WarpMouseInWindow(window: *mut SDL_Window, x: c_int, y: c_int);
pub fn SDL_SetRelativeMouseMode(enabled: SDL_bool) -> c_int;
pub fn SDL_GetRelativeMouseMode() -> SDL_bool;
pub fn SDL_CreateCursor(data: *const uint8_t, mask: *const uint8_t, w: c_int,
h: c_int, hot_x: c_int, hot_y: c_int) ->
*mut SDL_Cursor;
pub fn SDL_CreateColorCursor(surface: *mut SDL_Surface, hot_x: c_int,
hot_y: c_int) -> *mut SDL_Cursor;
pub fn SDL_CreateSystemCursor(id: SDL_SystemCursor) -> *mut SDL_Cursor;
pub fn SDL_SetCursor(cursor: *mut SDL_Cursor);
pub fn SDL_GetCursor() -> *mut SDL_Cursor;
pub fn SDL_GetDefaultCursor() -> *mut SDL_Cursor;
pub fn SDL_FreeCursor(cursor: *mut SDL_Cursor);
pub fn SDL_ShowCursor(toggle: SDL_State) -> SDL_State;
} | random_line_split |
|
must-use-ops.rs | // Issue #50124 - Test warning for unused operator expressions
// check-pass
#![warn(unused_must_use)]
fn | () {
let val = 1;
let val_pointer = &val;
// Comparison Operators
val == 1; //~ WARNING unused comparison
val < 1; //~ WARNING unused comparison
val <= 1; //~ WARNING unused comparison
val!= 1; //~ WARNING unused comparison
val >= 1; //~ WARNING unused comparison
val > 1; //~ WARNING unused comparison
// Arithmetic Operators
val + 2; //~ WARNING unused arithmetic operation
val - 2; //~ WARNING unused arithmetic operation
val / 2; //~ WARNING unused arithmetic operation
val * 2; //~ WARNING unused arithmetic operation
val % 2; //~ WARNING unused arithmetic operation
// Logical Operators
true && true; //~ WARNING unused logical operation
false || true; //~ WARNING unused logical operation
// Bitwise Operators
5 ^ val; //~ WARNING unused bitwise operation
5 & val; //~ WARNING unused bitwise operation
5 | val; //~ WARNING unused bitwise operation
5 << val; //~ WARNING unused bitwise operation
5 >> val; //~ WARNING unused bitwise operation
// Unary Operators
!val; //~ WARNING unused unary operation
-val; //~ WARNING unused unary operation
*val_pointer; //~ WARNING unused unary operation
}
| main | identifier_name |
must-use-ops.rs | // Issue #50124 - Test warning for unused operator expressions
// check-pass
#![warn(unused_must_use)]
fn main() | true && true; //~ WARNING unused logical operation
false || true; //~ WARNING unused logical operation
// Bitwise Operators
5 ^ val; //~ WARNING unused bitwise operation
5 & val; //~ WARNING unused bitwise operation
5 | val; //~ WARNING unused bitwise operation
5 << val; //~ WARNING unused bitwise operation
5 >> val; //~ WARNING unused bitwise operation
// Unary Operators
!val; //~ WARNING unused unary operation
-val; //~ WARNING unused unary operation
*val_pointer; //~ WARNING unused unary operation
}
| {
let val = 1;
let val_pointer = &val;
// Comparison Operators
val == 1; //~ WARNING unused comparison
val < 1; //~ WARNING unused comparison
val <= 1; //~ WARNING unused comparison
val != 1; //~ WARNING unused comparison
val >= 1; //~ WARNING unused comparison
val > 1; //~ WARNING unused comparison
// Arithmetic Operators
val + 2; //~ WARNING unused arithmetic operation
val - 2; //~ WARNING unused arithmetic operation
val / 2; //~ WARNING unused arithmetic operation
val * 2; //~ WARNING unused arithmetic operation
val % 2; //~ WARNING unused arithmetic operation
// Logical Operators | identifier_body |
must-use-ops.rs | // Issue #50124 - Test warning for unused operator expressions
// check-pass
#![warn(unused_must_use)]
fn main() {
let val = 1; | val == 1; //~ WARNING unused comparison
val < 1; //~ WARNING unused comparison
val <= 1; //~ WARNING unused comparison
val!= 1; //~ WARNING unused comparison
val >= 1; //~ WARNING unused comparison
val > 1; //~ WARNING unused comparison
// Arithmetic Operators
val + 2; //~ WARNING unused arithmetic operation
val - 2; //~ WARNING unused arithmetic operation
val / 2; //~ WARNING unused arithmetic operation
val * 2; //~ WARNING unused arithmetic operation
val % 2; //~ WARNING unused arithmetic operation
// Logical Operators
true && true; //~ WARNING unused logical operation
false || true; //~ WARNING unused logical operation
// Bitwise Operators
5 ^ val; //~ WARNING unused bitwise operation
5 & val; //~ WARNING unused bitwise operation
5 | val; //~ WARNING unused bitwise operation
5 << val; //~ WARNING unused bitwise operation
5 >> val; //~ WARNING unused bitwise operation
// Unary Operators
!val; //~ WARNING unused unary operation
-val; //~ WARNING unused unary operation
*val_pointer; //~ WARNING unused unary operation
} | let val_pointer = &val;
// Comparison Operators | random_line_split |
test.rs | extern crate mio;
extern crate bytes;
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate tempdir;
pub use ports::localhost;
mod test_battery;
mod test_close_on_drop;
mod test_echo_server;
mod test_multicast;
mod test_notify;
mod test_register_deregister;
mod test_timer;
mod test_udp_socket;
mod test_unix_echo_server;
mod ports {
use std::net::SocketAddr;
use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
use std::sync::atomic::Ordering::SeqCst;
// Helper for getting a unique port for the task run
// TODO: Reuse ports to not spam the system
static mut NEXT_PORT: AtomicUsize = ATOMIC_USIZE_INIT;
const FIRST_PORT: usize = 18080;
fn next_port() -> usize {
unsafe {
// If the atomic was never used, set it to the initial port
NEXT_PORT.compare_and_swap(0, FIRST_PORT, SeqCst);
// Get and increment the port list
NEXT_PORT.fetch_add(1, SeqCst)
}
}
pub fn localhost() -> SocketAddr {
let s = format!("127.0.0.1:{}", next_port());
FromStr::from_str(&s).unwrap()
}
} |
pub fn sleep_ms(ms: usize) {
use std::thread;
thread::sleep_ms(ms as u32);
} | random_line_split |
|
test.rs | extern crate mio;
extern crate bytes;
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate tempdir;
pub use ports::localhost;
mod test_battery;
mod test_close_on_drop;
mod test_echo_server;
mod test_multicast;
mod test_notify;
mod test_register_deregister;
mod test_timer;
mod test_udp_socket;
mod test_unix_echo_server;
mod ports {
use std::net::SocketAddr;
use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
use std::sync::atomic::Ordering::SeqCst;
// Helper for getting a unique port for the task run
// TODO: Reuse ports to not spam the system
static mut NEXT_PORT: AtomicUsize = ATOMIC_USIZE_INIT;
const FIRST_PORT: usize = 18080;
fn next_port() -> usize {
unsafe {
// If the atomic was never used, set it to the initial port
NEXT_PORT.compare_and_swap(0, FIRST_PORT, SeqCst);
// Get and increment the port list
NEXT_PORT.fetch_add(1, SeqCst)
}
}
pub fn | () -> SocketAddr {
let s = format!("127.0.0.1:{}", next_port());
FromStr::from_str(&s).unwrap()
}
}
pub fn sleep_ms(ms: usize) {
use std::thread;
thread::sleep_ms(ms as u32);
}
| localhost | identifier_name |
test.rs | extern crate mio;
extern crate bytes;
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate tempdir;
pub use ports::localhost;
mod test_battery;
mod test_close_on_drop;
mod test_echo_server;
mod test_multicast;
mod test_notify;
mod test_register_deregister;
mod test_timer;
mod test_udp_socket;
mod test_unix_echo_server;
mod ports {
use std::net::SocketAddr;
use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
use std::sync::atomic::Ordering::SeqCst;
// Helper for getting a unique port for the task run
// TODO: Reuse ports to not spam the system
static mut NEXT_PORT: AtomicUsize = ATOMIC_USIZE_INIT;
const FIRST_PORT: usize = 18080;
fn next_port() -> usize {
unsafe {
// If the atomic was never used, set it to the initial port
NEXT_PORT.compare_and_swap(0, FIRST_PORT, SeqCst);
// Get and increment the port list
NEXT_PORT.fetch_add(1, SeqCst)
}
}
pub fn localhost() -> SocketAddr {
let s = format!("127.0.0.1:{}", next_port());
FromStr::from_str(&s).unwrap()
}
}
pub fn sleep_ms(ms: usize) | {
use std::thread;
thread::sleep_ms(ms as u32);
} | identifier_body |
|
htmlsourceelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLSourceElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLSourceElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLSourceElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[dom_struct]
pub struct HTMLSourceElement {
htmlelement: HTMLElement
}
impl HTMLSourceElementDerived for EventTarget {
fn is_htmlsourceelement(&self) -> bool {
*self.type_id() == NodeTargetTypeId(ElementNodeTypeId(HTMLSourceElementTypeId))
}
}
impl HTMLSourceElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLSourceElement {
HTMLSourceElement {
htmlelement: HTMLElement::new_inherited(HTMLSourceElementTypeId, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn | (localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLSourceElement> {
let element = HTMLSourceElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLSourceElementBinding::Wrap)
}
}
impl Reflectable for HTMLSourceElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
}
| new | identifier_name |
htmlsourceelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLSourceElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLSourceElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLSourceElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[dom_struct]
pub struct HTMLSourceElement {
htmlelement: HTMLElement
}
impl HTMLSourceElementDerived for EventTarget {
fn is_htmlsourceelement(&self) -> bool {
*self.type_id() == NodeTargetTypeId(ElementNodeTypeId(HTMLSourceElementTypeId)) | }
impl HTMLSourceElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLSourceElement {
HTMLSourceElement {
htmlelement: HTMLElement::new_inherited(HTMLSourceElementTypeId, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLSourceElement> {
let element = HTMLSourceElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLSourceElementBinding::Wrap)
}
}
impl Reflectable for HTMLSourceElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
} | } | random_line_split |
htmlsourceelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLSourceElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLSourceElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLSourceElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[dom_struct]
pub struct HTMLSourceElement {
htmlelement: HTMLElement
}
impl HTMLSourceElementDerived for EventTarget {
fn is_htmlsourceelement(&self) -> bool {
*self.type_id() == NodeTargetTypeId(ElementNodeTypeId(HTMLSourceElementTypeId))
}
}
impl HTMLSourceElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLSourceElement {
HTMLSourceElement {
htmlelement: HTMLElement::new_inherited(HTMLSourceElementTypeId, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLSourceElement> {
let element = HTMLSourceElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLSourceElementBinding::Wrap)
}
}
impl Reflectable for HTMLSourceElement {
fn reflector<'a>(&'a self) -> &'a Reflector |
}
| {
self.htmlelement.reflector()
} | identifier_body |
mod.rs | // Copyright (c) 2017 Atsushi Miyake
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or http://apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// This file may not be copied, modified, or distributed except according to those terms.
mod enigma;
mod substitution_table;
mod router;
mod router_manager; | mod reflector;
mod plugboard;
mod alphabets;
pub use self::enigma::Enigma;
pub use self::router::Router;
pub use self::router::RouterProtocol;
pub use self::router::Digit;
pub use self::router_manager::RouterManager;
pub use self::reflector::Reflector;
pub use self::substitution_table::SubstitutionTable;
pub use self::plugboard::Plugboard;
pub use self::alphabets::ALPHABETS;
pub use self::alphabets::SUBSTITUTION_TABLE1;
pub use self::alphabets::SUBSTITUTION_TABLE2;
pub use self::alphabets::SUBSTITUTION_TABLE3;
pub use self::alphabets::REFLECTOR;
pub use self::alphabets::PLUGBOARD; | random_line_split |
|
unknown.rs | // Copyright (c) 2016 com-rs developers
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
use std::os::raw::c_void;
use super::{AsComPtr, HResult, IID};
/// Base interface for all COM types.
///
/// None of the methods on this struct should be called directly,
/// use [`ComPtr`](struct.ComPtr.html) instead.
#[derive(Debug)]
#[repr(C)]
pub struct IUnknown {
vtable: *const IUnknownVtbl
}
#[allow(missing_debug_implementations)]
#[repr(C)]
#[doc(hidden)]
pub struct | {
query_interface: extern "stdcall" fn(
*const IUnknown, &IID, *mut *mut c_void) -> HResult,
add_ref: extern "stdcall" fn(*const IUnknown) -> u32,
release: extern "stdcall" fn(*const IUnknown) -> u32
}
extern {
static IID_IUnknown: IID;
}
impl IUnknown {
/// Retrieves pointers to the supported interfaces on an object.
/// Use [`ComPtr::from`](struct.ComPtr.html#method.from) instead.
pub unsafe fn query_interface(&self, iid: &IID, object: *mut *mut c_void)
-> HResult {
((*self.vtable).query_interface)(self, iid, object)
}
/// Increments the reference count for an interface on an object.
/// Should never need to call this directly.
pub unsafe fn add_ref(&self) -> u32 {
((*self.vtable).add_ref)(self)
}
/// Decrements the reference count for an interface on an object.
/// Should never need to call this directly.
pub unsafe fn release(&self) -> u32 {
((*self.vtable).release)(self)
}
}
unsafe impl AsComPtr<IUnknown> for IUnknown { }
unsafe impl ::ComInterface for IUnknown {
#[doc(hidden)]
type Vtable = IUnknownVtbl;
fn iid() -> ::IID { unsafe { IID_IUnknown } }
}
| IUnknownVtbl | identifier_name |
unknown.rs | // Copyright (c) 2016 com-rs developers
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
use std::os::raw::c_void; | ///
/// None of the methods on this struct should be called directly,
/// use [`ComPtr`](struct.ComPtr.html) instead.
#[derive(Debug)]
#[repr(C)]
pub struct IUnknown {
vtable: *const IUnknownVtbl
}
#[allow(missing_debug_implementations)]
#[repr(C)]
#[doc(hidden)]
pub struct IUnknownVtbl {
query_interface: extern "stdcall" fn(
*const IUnknown, &IID, *mut *mut c_void) -> HResult,
add_ref: extern "stdcall" fn(*const IUnknown) -> u32,
release: extern "stdcall" fn(*const IUnknown) -> u32
}
extern {
static IID_IUnknown: IID;
}
impl IUnknown {
/// Retrieves pointers to the supported interfaces on an object.
/// Use [`ComPtr::from`](struct.ComPtr.html#method.from) instead.
pub unsafe fn query_interface(&self, iid: &IID, object: *mut *mut c_void)
-> HResult {
((*self.vtable).query_interface)(self, iid, object)
}
/// Increments the reference count for an interface on an object.
/// Should never need to call this directly.
pub unsafe fn add_ref(&self) -> u32 {
((*self.vtable).add_ref)(self)
}
/// Decrements the reference count for an interface on an object.
/// Should never need to call this directly.
pub unsafe fn release(&self) -> u32 {
((*self.vtable).release)(self)
}
}
unsafe impl AsComPtr<IUnknown> for IUnknown { }
unsafe impl ::ComInterface for IUnknown {
#[doc(hidden)]
type Vtable = IUnknownVtbl;
fn iid() -> ::IID { unsafe { IID_IUnknown } }
} |
use super::{AsComPtr, HResult, IID};
/// Base interface for all COM types. | random_line_split |
cross-crate-trait-method.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// compile-flags:-Zprint-mono-items=eager
#![deny(dead_code)]
#![feature(start)]
// aux-build:cgu_export_trait_method.rs
extern crate cgu_export_trait_method;
use cgu_export_trait_method::Trait;
//~ MONO_ITEM fn cross_crate_trait_method::start[0]
#[start]
fn | (_: isize, _: *const *const u8) -> isize {
// The object code of these methods is contained in the external crate, so
// calling them should *not* introduce codegen items in the current crate.
let _: (u32, u32) = Trait::without_default_impl(0);
let _: (char, u32) = Trait::without_default_impl(0);
// Currently, no object code is generated for trait methods with default
// implementations, unless they are actually called from somewhere. Therefore
// we cannot import the implementations and have to create our own inline.
//~ MONO_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl[0]<u32>
let _ = Trait::with_default_impl(0u32);
//~ MONO_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl[0]<char>
let _ = Trait::with_default_impl('c');
//~ MONO_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl_generic[0]<u32, &str>
let _ = Trait::with_default_impl_generic(0u32, "abc");
//~ MONO_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl_generic[0]<u32, bool>
let _ = Trait::with_default_impl_generic(0u32, false);
//~ MONO_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl_generic[0]<char, i16>
let _ = Trait::with_default_impl_generic('x', 1i16);
//~ MONO_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl_generic[0]<char, i32>
let _ = Trait::with_default_impl_generic('y', 0i32);
//~ MONO_ITEM fn cgu_export_trait_method::{{impl}}[1]::without_default_impl_generic[0]<char>
let _: (u32, char) = Trait::without_default_impl_generic('c');
//~ MONO_ITEM fn cgu_export_trait_method::{{impl}}[1]::without_default_impl_generic[0]<bool>
let _: (u32, bool) = Trait::without_default_impl_generic(false);
//~ MONO_ITEM fn cgu_export_trait_method::{{impl}}[0]::without_default_impl_generic[0]<char>
let _: (char, char) = Trait::without_default_impl_generic('c');
//~ MONO_ITEM fn cgu_export_trait_method::{{impl}}[0]::without_default_impl_generic[0]<bool>
let _: (char, bool) = Trait::without_default_impl_generic(false);
0
}
| start | identifier_name |
cross-crate-trait-method.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// compile-flags:-Zprint-mono-items=eager
#![deny(dead_code)]
#![feature(start)]
// aux-build:cgu_export_trait_method.rs
extern crate cgu_export_trait_method;
use cgu_export_trait_method::Trait;
//~ MONO_ITEM fn cross_crate_trait_method::start[0]
#[start]
fn start(_: isize, _: *const *const u8) -> isize |
//~ MONO_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl_generic[0]<char, i16>
let _ = Trait::with_default_impl_generic('x', 1i16);
//~ MONO_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl_generic[0]<char, i32>
let _ = Trait::with_default_impl_generic('y', 0i32);
//~ MONO_ITEM fn cgu_export_trait_method::{{impl}}[1]::without_default_impl_generic[0]<char>
let _: (u32, char) = Trait::without_default_impl_generic('c');
//~ MONO_ITEM fn cgu_export_trait_method::{{impl}}[1]::without_default_impl_generic[0]<bool>
let _: (u32, bool) = Trait::without_default_impl_generic(false);
//~ MONO_ITEM fn cgu_export_trait_method::{{impl}}[0]::without_default_impl_generic[0]<char>
let _: (char, char) = Trait::without_default_impl_generic('c');
//~ MONO_ITEM fn cgu_export_trait_method::{{impl}}[0]::without_default_impl_generic[0]<bool>
let _: (char, bool) = Trait::without_default_impl_generic(false);
0
}
| {
// The object code of these methods is contained in the external crate, so
// calling them should *not* introduce codegen items in the current crate.
let _: (u32, u32) = Trait::without_default_impl(0);
let _: (char, u32) = Trait::without_default_impl(0);
// Currently, no object code is generated for trait methods with default
// implementations, unless they are actually called from somewhere. Therefore
// we cannot import the implementations and have to create our own inline.
//~ MONO_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl[0]<u32>
let _ = Trait::with_default_impl(0u32);
//~ MONO_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl[0]<char>
let _ = Trait::with_default_impl('c');
//~ MONO_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl_generic[0]<u32, &str>
let _ = Trait::with_default_impl_generic(0u32, "abc");
//~ MONO_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl_generic[0]<u32, bool>
let _ = Trait::with_default_impl_generic(0u32, false); | identifier_body |
cross-crate-trait-method.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | // except according to those terms.
// ignore-tidy-linelength
// compile-flags:-Zprint-mono-items=eager
#![deny(dead_code)]
#![feature(start)]
// aux-build:cgu_export_trait_method.rs
extern crate cgu_export_trait_method;
use cgu_export_trait_method::Trait;
//~ MONO_ITEM fn cross_crate_trait_method::start[0]
#[start]
fn start(_: isize, _: *const *const u8) -> isize {
// The object code of these methods is contained in the external crate, so
// calling them should *not* introduce codegen items in the current crate.
let _: (u32, u32) = Trait::without_default_impl(0);
let _: (char, u32) = Trait::without_default_impl(0);
// Currently, no object code is generated for trait methods with default
// implementations, unless they are actually called from somewhere. Therefore
// we cannot import the implementations and have to create our own inline.
//~ MONO_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl[0]<u32>
let _ = Trait::with_default_impl(0u32);
//~ MONO_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl[0]<char>
let _ = Trait::with_default_impl('c');
//~ MONO_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl_generic[0]<u32, &str>
let _ = Trait::with_default_impl_generic(0u32, "abc");
//~ MONO_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl_generic[0]<u32, bool>
let _ = Trait::with_default_impl_generic(0u32, false);
//~ MONO_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl_generic[0]<char, i16>
let _ = Trait::with_default_impl_generic('x', 1i16);
//~ MONO_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl_generic[0]<char, i32>
let _ = Trait::with_default_impl_generic('y', 0i32);
//~ MONO_ITEM fn cgu_export_trait_method::{{impl}}[1]::without_default_impl_generic[0]<char>
let _: (u32, char) = Trait::without_default_impl_generic('c');
//~ MONO_ITEM fn cgu_export_trait_method::{{impl}}[1]::without_default_impl_generic[0]<bool>
let _: (u32, bool) = Trait::without_default_impl_generic(false);
//~ MONO_ITEM fn cgu_export_trait_method::{{impl}}[0]::without_default_impl_generic[0]<char>
let _: (char, char) = Trait::without_default_impl_generic('c');
//~ MONO_ITEM fn cgu_export_trait_method::{{impl}}[0]::without_default_impl_generic[0]<bool>
let _: (char, bool) = Trait::without_default_impl_generic(false);
0
} | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed | random_line_split |
tumor_mutational_burden.rs | use std::collections::BTreeMap;
use std::str;
use std::str::FromStr;
use anyhow::Result;
use bio::stats::{LogProb, PHREDProb};
use itertools::Itertools;
use itertools_num::linspace;
use rust_htslib::bcf::{self, Read};
use serde_json::{json, Value};
use crate::errors;
use crate::variants::model::AlleleFreq;
use crate::{Event, SimpleEvent};
/// Consider only variants in coding regions.
/// We rely on the ANN field for this.
fn | (rec: &mut bcf::Record) -> Result<bool> {
for ann in rec
.info(b"ANN")
.string()?
.expect("ANN field not found. Annotate VCF with e.g. snpEff.")
.iter()
{
let mut coding = false;
for (i, entry) in ann.split(|c| *c == b'|').enumerate() {
if i == 7 {
coding = entry == b"protein_coding";
}
if i == 13 {
coding &= entry!= b"";
}
}
if coding {
return Ok(true);
}
}
Ok(false)
}
#[derive(Debug, Clone, Serialize)]
struct TMB {
min_vaf: f64,
tmb: f64,
}
#[derive(Debug, Clone, Serialize)]
struct TMBStrat {
min_vaf: f64,
tmb: f64,
vartype: Vartype,
}
#[derive(Debug, Clone, Serialize)]
struct TMBBin {
vaf: f64,
tmb: f64,
vartype: Vartype,
}
struct Record {
prob: LogProb,
vartype: Vartype,
}
#[derive(
Display,
Debug,
Clone,
Copy,
Serialize,
Deserialize,
EnumString,
EnumIter,
IntoStaticStr,
EnumVariantNames,
)]
#[strum(serialize_all = "kebab_case")]
pub enum PlotMode {
Hist,
Curve,
}
/// Estimate tumor mutational burden based on Varlociraptor calls from STDIN and print result to STDOUT.
pub(crate) fn estimate(
somatic_tumor_events: &[String],
tumor_name: &str,
coding_genome_size: u64,
mode: PlotMode,
) -> Result<()> {
let mut bcf = bcf::Reader::from_stdin()?;
let header = bcf.header().to_owned();
let tumor_id = bcf
.header()
.sample_id(tumor_name.as_bytes())
.unwrap_or_else(|| panic!("Sample {} not found", tumor_name)); // TODO throw a proper error
let mut tmb = BTreeMap::new();
'records: loop {
let mut rec = bcf.empty_record();
match bcf.read(&mut rec) {
None => break,
Some(res) => res?,
}
let contig = str::from_utf8(header.rid2name(rec.rid().unwrap()).unwrap())?;
let vcfpos = rec.pos() + 1;
// obtain VAF estimates (do it here already to work around a segfault in htslib)
let vafs = rec.format(b"AF").float()?[tumor_id].to_owned();
if!is_valid_variant(&mut rec)? {
info!(
"Skipping variant {}:{} because it is not coding.",
contig, vcfpos
);
continue;
}
let alt_allele_count = (rec.allele_count() - 1) as usize;
// collect allele probabilities for given events
let mut allele_probs = vec![LogProb::ln_zero(); alt_allele_count];
for e in somatic_tumor_events {
let e = SimpleEvent { name: e.to_owned() };
let tag_name = e.tag_name("PROB");
if let Some(probs) = rec.info(tag_name.as_bytes()).float()? {
for i in 0..alt_allele_count {
allele_probs[i] =
allele_probs[i].ln_add_exp(LogProb::from(PHREDProb(probs[i] as f64)));
}
} else {
info!(
"Skipping variant {}:{} because it does not contain the required INFO tag {}.",
contig, vcfpos, tag_name
);
continue'records;
}
}
let vartypes = vartypes(&rec);
// push into TMB function
for i in 0..alt_allele_count {
let vaf = AlleleFreq(vafs[i] as f64);
let entry = tmb.entry(vaf).or_insert_with(Vec::new);
entry.push(Record {
prob: allele_probs[i],
vartype: vartypes[i],
});
}
}
if tmb.is_empty() {
return Err(errors::Error::NoRecordsFound.into());
}
let calc_tmb = |probs: &[LogProb]| -> f64 {
let count = LogProb::ln_sum_exp(probs).exp();
// Expected number of variants with VAF>=min_vaf per megabase.
(count / coding_genome_size as f64) * 1000000.0
};
let print_plot =
|data: serde_json::Value, blueprint: &str, cutpoint_tmb: f64, max_tmb: f64| -> Result<()> {
let mut blueprint = serde_json::from_str(blueprint)?;
if let Value::Object(ref mut blueprint) = blueprint {
blueprint["data"]["values"] = data;
blueprint["vconcat"][0]["encoding"]["y"]["scale"]["domain"] =
json!([cutpoint_tmb, max_tmb]);
blueprint["vconcat"][1]["encoding"]["y"]["scale"]["domain"] =
json!([0.0, cutpoint_tmb]);
// print to STDOUT
println!("{}", serde_json::to_string_pretty(blueprint)?);
Ok(())
} else {
unreachable!();
}
};
let min_vafs = linspace(0.0, 1.0, 100).map(AlleleFreq);
match mode {
PlotMode::Hist => {
let mut plot_data = Vec::new();
// perform binning for histogram
let mut max_tmbs = Vec::new();
let mut cutpoint_tmbs = Vec::new();
for (i, center_vaf) in linspace(0.05, 0.95, 19).enumerate() {
let groups = tmb
.range(AlleleFreq(center_vaf - 0.05)..AlleleFreq(center_vaf + 0.05))
.map(|(_, records)| records)
.flatten()
.map(|record| (record.vartype, record.prob))
.into_group_map();
for (vartype, probs) in groups {
let tmb = calc_tmb(&probs);
if i == 0 {
max_tmbs.push(tmb);
}
// cutpoint beyond 15%
if i == 2 {
cutpoint_tmbs.push(tmb);
}
plot_data.push(TMBBin {
vaf: center_vaf,
tmb,
vartype,
});
}
}
let max_tmb: f64 = max_tmbs.iter().sum();
let cutpoint_tmb: f64 = cutpoint_tmbs.iter().sum();
print_plot(
json!(plot_data),
include_str!("../../templates/plots/vaf_hist.json"),
cutpoint_tmb,
max_tmb,
)
}
PlotMode::Curve => {
let mut plot_data = Vec::new();
let mut max_tmbs = Vec::new();
let mut cutpoint_tmbs = Vec::new();
// calculate TMB function (expected number of somatic variants per minimum allele frequency)
for (i, min_vaf) in min_vafs.enumerate() {
let groups = tmb
.range(min_vaf..)
.map(|(_, records)| records)
.flatten()
.map(|record| (record.vartype, record.prob))
.into_group_map();
for (vartype, probs) in groups {
let tmb = calc_tmb(&probs);
if i == 0 {
max_tmbs.push(tmb);
}
if i == 10 {
cutpoint_tmbs.push(tmb);
}
plot_data.push(TMBStrat {
min_vaf: *min_vaf,
tmb,
vartype,
});
}
}
let max_tmb: f64 = max_tmbs.iter().sum();
let cutpoint_tmb: f64 = cutpoint_tmbs.iter().sum();
print_plot(
json!(plot_data),
include_str!("../../templates/plots/vaf_curve_strat.json"),
cutpoint_tmb,
max_tmb,
)
}
}
}
#[derive(
Display,
Debug,
Clone,
Copy,
Serialize,
Deserialize,
EnumString,
EnumIter,
IntoStaticStr,
PartialEq,
Hash,
Eq,
)]
pub(crate) enum Vartype {
DEL,
INS,
INV,
DUP,
BND,
MNV,
Complex,
#[strum(serialize = "A>C")]
#[serde(rename = "A>C")]
AC,
#[strum(serialize = "A>G")]
#[serde(rename = "A>G")]
AG,
#[strum(serialize = "A>T")]
#[serde(rename = "A>T")]
AT,
#[strum(serialize = "C>A")]
#[serde(rename = "C>A")]
CA,
#[strum(serialize = "C>G")]
#[serde(rename = "C>G")]
CG,
#[strum(serialize = "C>T")]
#[serde(rename = "C>T")]
CT,
#[strum(serialize = "G>A")]
#[serde(rename = "G>A")]
GA,
#[strum(serialize = "G>C")]
#[serde(rename = "G>C")]
GC,
#[strum(serialize = "G>T")]
#[serde(rename = "G>T")]
GT,
#[strum(serialize = "T>A")]
#[serde(rename = "T>A")]
TA,
#[strum(serialize = "T>C")]
#[serde(rename = "T>C")]
TC,
#[strum(serialize = "T>G")]
#[serde(rename = "T>G")]
TG,
}
pub(crate) fn vartypes(record: &bcf::Record) -> Vec<Vartype> {
let ref_allele = record.alleles()[0];
record.alleles()[1..]
.iter()
.map(|alt_allele| {
if alt_allele == b"<DEL>" {
Vartype::DEL
} else if alt_allele == b"<INV>" {
Vartype::INV
} else if alt_allele == b"<DUP>" {
Vartype::DUP
} else if alt_allele == b"<BND>" {
Vartype::BND
} else if ref_allele.len() == 1 && alt_allele.len() == 1 {
Vartype::from_str(&format!(
"{}>{}",
str::from_utf8(ref_allele).unwrap(),
str::from_utf8(alt_allele).unwrap()
))
.unwrap()
} else if ref_allele.len() > 1 && alt_allele.len() == 1 {
Vartype::DEL
} else if ref_allele.len() == 1 && alt_allele.len() > 1 {
Vartype::INS
} else if ref_allele.len() == alt_allele.len() && ref_allele.len() > 1 {
Vartype::MNV
} else {
Vartype::Complex
}
})
.collect_vec()
}
| is_valid_variant | identifier_name |
tumor_mutational_burden.rs | use std::collections::BTreeMap;
use std::str;
use std::str::FromStr;
use anyhow::Result;
use bio::stats::{LogProb, PHREDProb};
use itertools::Itertools;
use itertools_num::linspace;
use rust_htslib::bcf::{self, Read};
use serde_json::{json, Value};
use crate::errors;
use crate::variants::model::AlleleFreq;
use crate::{Event, SimpleEvent};
/// Consider only variants in coding regions.
/// We rely on the ANN field for this.
fn is_valid_variant(rec: &mut bcf::Record) -> Result<bool> {
for ann in rec
.info(b"ANN")
.string()?
.expect("ANN field not found. Annotate VCF with e.g. snpEff.")
.iter()
{
let mut coding = false;
for (i, entry) in ann.split(|c| *c == b'|').enumerate() {
if i == 7 {
coding = entry == b"protein_coding";
}
if i == 13 {
coding &= entry!= b"";
}
}
if coding {
return Ok(true);
}
}
Ok(false)
}
#[derive(Debug, Clone, Serialize)]
struct TMB {
min_vaf: f64,
tmb: f64,
}
| struct TMBStrat {
min_vaf: f64,
tmb: f64,
vartype: Vartype,
}
#[derive(Debug, Clone, Serialize)]
struct TMBBin {
vaf: f64,
tmb: f64,
vartype: Vartype,
}
struct Record {
prob: LogProb,
vartype: Vartype,
}
#[derive(
Display,
Debug,
Clone,
Copy,
Serialize,
Deserialize,
EnumString,
EnumIter,
IntoStaticStr,
EnumVariantNames,
)]
#[strum(serialize_all = "kebab_case")]
pub enum PlotMode {
Hist,
Curve,
}
/// Estimate tumor mutational burden based on Varlociraptor calls from STDIN and print result to STDOUT.
pub(crate) fn estimate(
somatic_tumor_events: &[String],
tumor_name: &str,
coding_genome_size: u64,
mode: PlotMode,
) -> Result<()> {
let mut bcf = bcf::Reader::from_stdin()?;
let header = bcf.header().to_owned();
let tumor_id = bcf
.header()
.sample_id(tumor_name.as_bytes())
.unwrap_or_else(|| panic!("Sample {} not found", tumor_name)); // TODO throw a proper error
let mut tmb = BTreeMap::new();
'records: loop {
let mut rec = bcf.empty_record();
match bcf.read(&mut rec) {
None => break,
Some(res) => res?,
}
let contig = str::from_utf8(header.rid2name(rec.rid().unwrap()).unwrap())?;
let vcfpos = rec.pos() + 1;
// obtain VAF estimates (do it here already to work around a segfault in htslib)
let vafs = rec.format(b"AF").float()?[tumor_id].to_owned();
if!is_valid_variant(&mut rec)? {
info!(
"Skipping variant {}:{} because it is not coding.",
contig, vcfpos
);
continue;
}
let alt_allele_count = (rec.allele_count() - 1) as usize;
// collect allele probabilities for given events
let mut allele_probs = vec![LogProb::ln_zero(); alt_allele_count];
for e in somatic_tumor_events {
let e = SimpleEvent { name: e.to_owned() };
let tag_name = e.tag_name("PROB");
if let Some(probs) = rec.info(tag_name.as_bytes()).float()? {
for i in 0..alt_allele_count {
allele_probs[i] =
allele_probs[i].ln_add_exp(LogProb::from(PHREDProb(probs[i] as f64)));
}
} else {
info!(
"Skipping variant {}:{} because it does not contain the required INFO tag {}.",
contig, vcfpos, tag_name
);
continue'records;
}
}
let vartypes = vartypes(&rec);
// push into TMB function
for i in 0..alt_allele_count {
let vaf = AlleleFreq(vafs[i] as f64);
let entry = tmb.entry(vaf).or_insert_with(Vec::new);
entry.push(Record {
prob: allele_probs[i],
vartype: vartypes[i],
});
}
}
if tmb.is_empty() {
return Err(errors::Error::NoRecordsFound.into());
}
let calc_tmb = |probs: &[LogProb]| -> f64 {
let count = LogProb::ln_sum_exp(probs).exp();
// Expected number of variants with VAF>=min_vaf per megabase.
(count / coding_genome_size as f64) * 1000000.0
};
let print_plot =
|data: serde_json::Value, blueprint: &str, cutpoint_tmb: f64, max_tmb: f64| -> Result<()> {
let mut blueprint = serde_json::from_str(blueprint)?;
if let Value::Object(ref mut blueprint) = blueprint {
blueprint["data"]["values"] = data;
blueprint["vconcat"][0]["encoding"]["y"]["scale"]["domain"] =
json!([cutpoint_tmb, max_tmb]);
blueprint["vconcat"][1]["encoding"]["y"]["scale"]["domain"] =
json!([0.0, cutpoint_tmb]);
// print to STDOUT
println!("{}", serde_json::to_string_pretty(blueprint)?);
Ok(())
} else {
unreachable!();
}
};
let min_vafs = linspace(0.0, 1.0, 100).map(AlleleFreq);
match mode {
PlotMode::Hist => {
let mut plot_data = Vec::new();
// perform binning for histogram
let mut max_tmbs = Vec::new();
let mut cutpoint_tmbs = Vec::new();
for (i, center_vaf) in linspace(0.05, 0.95, 19).enumerate() {
let groups = tmb
.range(AlleleFreq(center_vaf - 0.05)..AlleleFreq(center_vaf + 0.05))
.map(|(_, records)| records)
.flatten()
.map(|record| (record.vartype, record.prob))
.into_group_map();
for (vartype, probs) in groups {
let tmb = calc_tmb(&probs);
if i == 0 {
max_tmbs.push(tmb);
}
// cutpoint beyond 15%
if i == 2 {
cutpoint_tmbs.push(tmb);
}
plot_data.push(TMBBin {
vaf: center_vaf,
tmb,
vartype,
});
}
}
let max_tmb: f64 = max_tmbs.iter().sum();
let cutpoint_tmb: f64 = cutpoint_tmbs.iter().sum();
print_plot(
json!(plot_data),
include_str!("../../templates/plots/vaf_hist.json"),
cutpoint_tmb,
max_tmb,
)
}
PlotMode::Curve => {
let mut plot_data = Vec::new();
let mut max_tmbs = Vec::new();
let mut cutpoint_tmbs = Vec::new();
// calculate TMB function (expected number of somatic variants per minimum allele frequency)
for (i, min_vaf) in min_vafs.enumerate() {
let groups = tmb
.range(min_vaf..)
.map(|(_, records)| records)
.flatten()
.map(|record| (record.vartype, record.prob))
.into_group_map();
for (vartype, probs) in groups {
let tmb = calc_tmb(&probs);
if i == 0 {
max_tmbs.push(tmb);
}
if i == 10 {
cutpoint_tmbs.push(tmb);
}
plot_data.push(TMBStrat {
min_vaf: *min_vaf,
tmb,
vartype,
});
}
}
let max_tmb: f64 = max_tmbs.iter().sum();
let cutpoint_tmb: f64 = cutpoint_tmbs.iter().sum();
print_plot(
json!(plot_data),
include_str!("../../templates/plots/vaf_curve_strat.json"),
cutpoint_tmb,
max_tmb,
)
}
}
}
#[derive(
Display,
Debug,
Clone,
Copy,
Serialize,
Deserialize,
EnumString,
EnumIter,
IntoStaticStr,
PartialEq,
Hash,
Eq,
)]
pub(crate) enum Vartype {
DEL,
INS,
INV,
DUP,
BND,
MNV,
Complex,
#[strum(serialize = "A>C")]
#[serde(rename = "A>C")]
AC,
#[strum(serialize = "A>G")]
#[serde(rename = "A>G")]
AG,
#[strum(serialize = "A>T")]
#[serde(rename = "A>T")]
AT,
#[strum(serialize = "C>A")]
#[serde(rename = "C>A")]
CA,
#[strum(serialize = "C>G")]
#[serde(rename = "C>G")]
CG,
#[strum(serialize = "C>T")]
#[serde(rename = "C>T")]
CT,
#[strum(serialize = "G>A")]
#[serde(rename = "G>A")]
GA,
#[strum(serialize = "G>C")]
#[serde(rename = "G>C")]
GC,
#[strum(serialize = "G>T")]
#[serde(rename = "G>T")]
GT,
#[strum(serialize = "T>A")]
#[serde(rename = "T>A")]
TA,
#[strum(serialize = "T>C")]
#[serde(rename = "T>C")]
TC,
#[strum(serialize = "T>G")]
#[serde(rename = "T>G")]
TG,
}
pub(crate) fn vartypes(record: &bcf::Record) -> Vec<Vartype> {
let ref_allele = record.alleles()[0];
record.alleles()[1..]
.iter()
.map(|alt_allele| {
if alt_allele == b"<DEL>" {
Vartype::DEL
} else if alt_allele == b"<INV>" {
Vartype::INV
} else if alt_allele == b"<DUP>" {
Vartype::DUP
} else if alt_allele == b"<BND>" {
Vartype::BND
} else if ref_allele.len() == 1 && alt_allele.len() == 1 {
Vartype::from_str(&format!(
"{}>{}",
str::from_utf8(ref_allele).unwrap(),
str::from_utf8(alt_allele).unwrap()
))
.unwrap()
} else if ref_allele.len() > 1 && alt_allele.len() == 1 {
Vartype::DEL
} else if ref_allele.len() == 1 && alt_allele.len() > 1 {
Vartype::INS
} else if ref_allele.len() == alt_allele.len() && ref_allele.len() > 1 {
Vartype::MNV
} else {
Vartype::Complex
}
})
.collect_vec()
} | #[derive(Debug, Clone, Serialize)] | random_line_split |
build.rs | use std::env;
use std::path::{Path, PathBuf};
use std::process::Command;
fn gurobi_home() -> String {
let var = env::var("GUROBI_HOME").expect("failed to retrieve the value of GUROBI_HOME");
if!Path::new(&var).exists() {
panic!("GUROBI_HOME is invalid path");
}
var
}
fn append_path(addpath: PathBuf) |
fn get_version_triple() -> (i32, i32, i32) {
append_path(PathBuf::from(gurobi_home()).join("bin"));
let output = Command::new("gurobi_cl").arg("--version").output().expect("failed to execute gurobi_cl");
let verno: Vec<_> = String::from_utf8_lossy(&output.stdout)
.into_owned()
.split_whitespace()
.nth(3)
.expect("failed to get version string")
.split(".")
.map(|s| s.parse().expect("failed to parse version tuple"))
.collect();
(verno[0], verno[1], verno[2])
}
fn main() {
let gurobi_home = gurobi_home();
let libpath: PathBuf = PathBuf::from(gurobi_home).join("lib");
let (major, minor, _) = get_version_triple();
let libname = format!("gurobi{}{}", major, minor);
println!("cargo:rustc-link-search=native={}", libpath.display());
println!("cargo:rustc-link-lib={}", libname);
}
| {
let path = env::var_os("PATH").expect("failed to retrieve the value of PATH");
let mut paths: Vec<_> = env::split_paths(&path).collect();
paths.push(addpath);
let new_path = env::join_paths(paths).unwrap();
env::set_var("PATH", &new_path);
} | identifier_body |
build.rs | use std::env;
use std::path::{Path, PathBuf};
use std::process::Command;
fn gurobi_home() -> String {
let var = env::var("GUROBI_HOME").expect("failed to retrieve the value of GUROBI_HOME");
if!Path::new(&var).exists() |
var
}
fn append_path(addpath: PathBuf) {
let path = env::var_os("PATH").expect("failed to retrieve the value of PATH");
let mut paths: Vec<_> = env::split_paths(&path).collect();
paths.push(addpath);
let new_path = env::join_paths(paths).unwrap();
env::set_var("PATH", &new_path);
}
fn get_version_triple() -> (i32, i32, i32) {
append_path(PathBuf::from(gurobi_home()).join("bin"));
let output = Command::new("gurobi_cl").arg("--version").output().expect("failed to execute gurobi_cl");
let verno: Vec<_> = String::from_utf8_lossy(&output.stdout)
.into_owned()
.split_whitespace()
.nth(3)
.expect("failed to get version string")
.split(".")
.map(|s| s.parse().expect("failed to parse version tuple"))
.collect();
(verno[0], verno[1], verno[2])
}
fn main() {
let gurobi_home = gurobi_home();
let libpath: PathBuf = PathBuf::from(gurobi_home).join("lib");
let (major, minor, _) = get_version_triple();
let libname = format!("gurobi{}{}", major, minor);
println!("cargo:rustc-link-search=native={}", libpath.display());
println!("cargo:rustc-link-lib={}", libname);
}
| {
panic!("GUROBI_HOME is invalid path");
} | conditional_block |
build.rs | use std::env;
use std::path::{Path, PathBuf};
use std::process::Command;
fn gurobi_home() -> String {
let var = env::var("GUROBI_HOME").expect("failed to retrieve the value of GUROBI_HOME");
if!Path::new(&var).exists() {
panic!("GUROBI_HOME is invalid path");
}
var
}
fn append_path(addpath: PathBuf) {
let path = env::var_os("PATH").expect("failed to retrieve the value of PATH");
let mut paths: Vec<_> = env::split_paths(&path).collect();
paths.push(addpath);
let new_path = env::join_paths(paths).unwrap();
env::set_var("PATH", &new_path);
} | fn get_version_triple() -> (i32, i32, i32) {
append_path(PathBuf::from(gurobi_home()).join("bin"));
let output = Command::new("gurobi_cl").arg("--version").output().expect("failed to execute gurobi_cl");
let verno: Vec<_> = String::from_utf8_lossy(&output.stdout)
.into_owned()
.split_whitespace()
.nth(3)
.expect("failed to get version string")
.split(".")
.map(|s| s.parse().expect("failed to parse version tuple"))
.collect();
(verno[0], verno[1], verno[2])
}
fn main() {
let gurobi_home = gurobi_home();
let libpath: PathBuf = PathBuf::from(gurobi_home).join("lib");
let (major, minor, _) = get_version_triple();
let libname = format!("gurobi{}{}", major, minor);
println!("cargo:rustc-link-search=native={}", libpath.display());
println!("cargo:rustc-link-lib={}", libname);
} | random_line_split |
|
build.rs | use std::env;
use std::path::{Path, PathBuf};
use std::process::Command;
fn gurobi_home() -> String {
let var = env::var("GUROBI_HOME").expect("failed to retrieve the value of GUROBI_HOME");
if!Path::new(&var).exists() {
panic!("GUROBI_HOME is invalid path");
}
var
}
fn | (addpath: PathBuf) {
let path = env::var_os("PATH").expect("failed to retrieve the value of PATH");
let mut paths: Vec<_> = env::split_paths(&path).collect();
paths.push(addpath);
let new_path = env::join_paths(paths).unwrap();
env::set_var("PATH", &new_path);
}
fn get_version_triple() -> (i32, i32, i32) {
append_path(PathBuf::from(gurobi_home()).join("bin"));
let output = Command::new("gurobi_cl").arg("--version").output().expect("failed to execute gurobi_cl");
let verno: Vec<_> = String::from_utf8_lossy(&output.stdout)
.into_owned()
.split_whitespace()
.nth(3)
.expect("failed to get version string")
.split(".")
.map(|s| s.parse().expect("failed to parse version tuple"))
.collect();
(verno[0], verno[1], verno[2])
}
fn main() {
let gurobi_home = gurobi_home();
let libpath: PathBuf = PathBuf::from(gurobi_home).join("lib");
let (major, minor, _) = get_version_triple();
let libname = format!("gurobi{}{}", major, minor);
println!("cargo:rustc-link-search=native={}", libpath.display());
println!("cargo:rustc-link-lib={}", libname);
}
| append_path | identifier_name |
camera.rs | extern crate cgmath;
use self::cgmath::{Point3, Vector3, Matrix4, SquareMatrix, Deg};
#[derive(Copy, Clone)]
/// Holds view and projection matrices.
///
/// See `Scene`.
pub struct Camera {
view_matrix: Matrix4<f32>,
proj_matrix: Matrix4<f32>,
// Reduces draw call computations
vp_matrix: Matrix4<f32>
}
impl Camera {
/// Create a new camera.
pub fn new() -> Camera {
return Camera {
view_matrix: Matrix4::identity(),
proj_matrix: Matrix4::identity(),
vp_matrix: Matrix4::identity()
};
}
/// Create a new `Camera` from view and projection matrices.
pub fn from_matrices(view_matrix: Matrix4<f32>, proj_matrix: Matrix4<f32>) -> Camera {
return Camera {
view_matrix: view_matrix,
proj_matrix: proj_matrix,
vp_matrix: proj_matrix * view_matrix
};
}
/// Get VP matrix.
pub fn vp_matrix(&self) -> Matrix4<f32> {
return self.vp_matrix;
}
/// Update the view matrix.
pub fn look_at(&mut self, eye: Point3<f32>, center: Point3<f32>, up: Vector3<f32>) {
self.view_matrix = Matrix4::look_at(eye, center, up);
self.vp_matrix = self.proj_matrix * self.view_matrix;
}
/// Update the projection matrix.
pub fn perspective(&mut self, fovy: f32, aspect: f32, near: f32, far: f32) |
}
| {
self.proj_matrix = cgmath::perspective(Deg::new(fovy), aspect, near, far);
self.vp_matrix = self.proj_matrix * self.view_matrix;
} | identifier_body |
camera.rs | extern crate cgmath;
use self::cgmath::{Point3, Vector3, Matrix4, SquareMatrix, Deg};
#[derive(Copy, Clone)]
/// Holds view and projection matrices.
///
/// See `Scene`.
pub struct Camera {
view_matrix: Matrix4<f32>,
proj_matrix: Matrix4<f32>,
// Reduces draw call computations
vp_matrix: Matrix4<f32>
}
impl Camera {
/// Create a new camera.
pub fn new() -> Camera {
return Camera {
view_matrix: Matrix4::identity(),
proj_matrix: Matrix4::identity(),
vp_matrix: Matrix4::identity()
};
}
/// Create a new `Camera` from view and projection matrices.
pub fn from_matrices(view_matrix: Matrix4<f32>, proj_matrix: Matrix4<f32>) -> Camera {
return Camera {
view_matrix: view_matrix,
proj_matrix: proj_matrix,
vp_matrix: proj_matrix * view_matrix
};
}
/// Get VP matrix.
pub fn | (&self) -> Matrix4<f32> {
return self.vp_matrix;
}
/// Update the view matrix.
pub fn look_at(&mut self, eye: Point3<f32>, center: Point3<f32>, up: Vector3<f32>) {
self.view_matrix = Matrix4::look_at(eye, center, up);
self.vp_matrix = self.proj_matrix * self.view_matrix;
}
/// Update the projection matrix.
pub fn perspective(&mut self, fovy: f32, aspect: f32, near: f32, far: f32) {
self.proj_matrix = cgmath::perspective(Deg::new(fovy), aspect, near, far);
self.vp_matrix = self.proj_matrix * self.view_matrix;
}
}
| vp_matrix | identifier_name |
camera.rs | extern crate cgmath;
use self::cgmath::{Point3, Vector3, Matrix4, SquareMatrix, Deg};
#[derive(Copy, Clone)]
/// Holds view and projection matrices.
///
/// See `Scene`.
pub struct Camera {
view_matrix: Matrix4<f32>,
proj_matrix: Matrix4<f32>,
// Reduces draw call computations
vp_matrix: Matrix4<f32>
}
impl Camera {
/// Create a new camera.
pub fn new() -> Camera {
return Camera {
view_matrix: Matrix4::identity(),
proj_matrix: Matrix4::identity(),
vp_matrix: Matrix4::identity()
};
} | /// Create a new `Camera` from view and projection matrices.
pub fn from_matrices(view_matrix: Matrix4<f32>, proj_matrix: Matrix4<f32>) -> Camera {
return Camera {
view_matrix: view_matrix,
proj_matrix: proj_matrix,
vp_matrix: proj_matrix * view_matrix
};
}
/// Get VP matrix.
pub fn vp_matrix(&self) -> Matrix4<f32> {
return self.vp_matrix;
}
/// Update the view matrix.
pub fn look_at(&mut self, eye: Point3<f32>, center: Point3<f32>, up: Vector3<f32>) {
self.view_matrix = Matrix4::look_at(eye, center, up);
self.vp_matrix = self.proj_matrix * self.view_matrix;
}
/// Update the projection matrix.
pub fn perspective(&mut self, fovy: f32, aspect: f32, near: f32, far: f32) {
self.proj_matrix = cgmath::perspective(Deg::new(fovy), aspect, near, far);
self.vp_matrix = self.proj_matrix * self.view_matrix;
}
} | random_line_split |
|
binop-mul-bool.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:`*` cannot be applied to type `bool`
fn main() { let x = true * false; }
// ^^^^ERR(<1.19.0) binary operation
// ^^^^NOTE(<1.19.0) an implementation of
// ^^^^^^^^^^^^ERR(>=1.19.0,<1.35.0-beta) binary operation
// ^^^^^^^^^^^^NOTE(>=1.19.0,<1.35.0-beta) an implementation of
// ^ERR(>=1.35.0-beta,<1.42.0-beta) binary operation
// ^ERR(>=1.42.0-beta) cannot multiply
// ^NOTE(>=1.35.0-beta,<1.43.0-beta) an implementation of
// ^^^^ERR(>=1.35.0-beta) bool
// ^^^^^ERR(>=1.35.0-beta) bool | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | random_line_split |
binop-mul-bool.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:`*` cannot be applied to type `bool`
fn | () { let x = true * false; }
// ^^^^ERR(<1.19.0) binary operation
// ^^^^NOTE(<1.19.0) an implementation of
// ^^^^^^^^^^^^ERR(>=1.19.0,<1.35.0-beta) binary operation
// ^^^^^^^^^^^^NOTE(>=1.19.0,<1.35.0-beta) an implementation of
// ^ERR(>=1.35.0-beta,<1.42.0-beta) binary operation
// ^ERR(>=1.42.0-beta) cannot multiply
// ^NOTE(>=1.35.0-beta,<1.43.0-beta) an implementation of
// ^^^^ERR(>=1.35.0-beta) bool
// ^^^^^ERR(>=1.35.0-beta) bool
| main | identifier_name |
binop-mul-bool.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:`*` cannot be applied to type `bool`
fn main() |
// ^^^^ERR(<1.19.0) binary operation
// ^^^^NOTE(<1.19.0) an implementation of
// ^^^^^^^^^^^^ERR(>=1.19.0,<1.35.0-beta) binary operation
// ^^^^^^^^^^^^NOTE(>=1.19.0,<1.35.0-beta) an implementation of
// ^ERR(>=1.35.0-beta,<1.42.0-beta) binary operation
// ^ERR(>=1.42.0-beta) cannot multiply
// ^NOTE(>=1.35.0-beta,<1.43.0-beta) an implementation of
// ^^^^ERR(>=1.35.0-beta) bool
// ^^^^^ERR(>=1.35.0-beta) bool
| { let x = true * false; } | identifier_body |
text.rs | use std::cmp;
use {
Align,
Backend,
CharacterCache,
Color,
Colorable,
Dimension,
FontSize,
LineBreak,
Range,
Rect,
Scalar,
Ui,
Widget,
};
use widget;
/// Displays some given text centred within a rectangular area.
///
/// By default, the rectangular dimensions are fit to the area occuppied by the text.
///
/// If some horizontal dimension is given, the text will automatically wrap to the width and align
/// in accordance with the produced **Align**.
pub struct Text<'a> {
/// Data necessary and common for all widget builder types.
pub common: widget::CommonBuilder,
/// The text to be drawn by the **Text**.
pub text: &'a str,
/// Unique styling for the **Text**.
pub style: Style,
}
/// The unique kind for the widget.
pub const KIND: widget::Kind = "Text";
widget_style!{
KIND;
/// The styling for a **Text**'s graphics.
style Style {
/// The font size for the **Text**.
- font_size: FontSize { theme.font_size_medium }
/// The color of the **Text**.
- color: Color { theme.label_color }
/// Whether or not the text should wrap around the width.
- maybe_wrap: Option<Wrap> { Some(Wrap::Whitespace) }
/// The spacing between consecutive lines.
- line_spacing: Scalar { 1.0 }
/// Alignment of the text along the *x* axis.
- text_align: Align { Align::Start }
// /// The typeface with which the Text is rendered.
// - typeface: Path,
// /// The line styling for the text.
// - line: Option<Line> { None },
}
}
/// The way in which text should wrap around the width.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum Wrap {
/// Wrap at the first character that exceeds the width.
Character,
/// Wrap at the first word that exceeds the width.
Whitespace,
}
// /// Line styling for the **Text**.
// pub enum Line {
// /// Underline the text.
// Under,
// /// Overline the text.
// Over,
// /// Strikethrough the text.
// Through,
// }
/// The state to be stored between updates for the **Text**.
#[derive(Clone, Debug, PartialEq)]
pub struct State {
/// An owned version of the string.
string: String,
/// An index range for each line in the string.
line_breaks: Vec<(usize, Option<LineBreak>)>,
}
impl<'a> Text<'a> {
/// Build a new **Text** widget.
pub fn new(text: &'a str) -> Self {
Text {
common: widget::CommonBuilder::new(),
text: text,
style: Style::new(),
}
}
/// Build the **Text** with the given font size.
pub fn font_size(mut self, size: FontSize) -> Self {
self.style.font_size = Some(size);
self
}
/// Specify that the **Text** should not wrap lines around the width.
pub fn no_line_wrap(mut self) -> Self {
self.style.maybe_wrap = Some(None);
self
}
/// Line wrap the **Text** at the beginning of the first word that exceeds the width.
pub fn wrap_by_word(mut self) -> Self {
self.style.maybe_wrap = Some(Some(Wrap::Whitespace));
self
}
/// Line wrap the **Text** at the beginning of the first character that exceeds the width.
pub fn wrap_by_character(mut self) -> Self {
self.style.maybe_wrap = Some(Some(Wrap::Character));
self
}
/// Build the **Text** with the given **Style**.
pub fn with_style(mut self, style: Style) -> Self {
self.style = style;
self
}
/// Align the text to the left of its bounding **Rect**'s *x* axis range.
pub fn align_text_left(mut self) -> Self {
self.style.text_align = Some(Align::Start);
self
}
| }
/// Align the text to the right of its bounding **Rect**'s *x* axis range.
pub fn align_text_right(mut self) -> Self {
self.style.text_align = Some(Align::End);
self
}
/// The height of the space used between consecutive lines.
pub fn line_spacing(mut self, height: Scalar) -> Self {
self.style.line_spacing = Some(height);
self
}
}
impl<'a> Widget for Text<'a> {
type State = State;
type Style = Style;
fn common(&self) -> &widget::CommonBuilder {
&self.common
}
fn common_mut(&mut self) -> &mut widget::CommonBuilder {
&mut self.common
}
fn unique_kind(&self) -> &'static str {
KIND
}
fn init_state(&self) -> State {
State {
string: String::new(),
line_breaks: Vec::new(),
}
}
fn style(&self) -> Style {
self.style.clone()
}
/// If no specific width was given, we'll use the width of the widest line as a default.
fn default_x_dimension<B: Backend>(&self, ui: &Ui<B>) -> Dimension {
let font_size = self.style.font_size(&ui.theme);
let mut max_width = 0.0;
for line in self.text.lines() {
let width = ui.glyph_cache.width(font_size, line);
max_width = ::utils::partial_max(max_width, width);
}
Dimension::Absolute(max_width)
}
/// If no specific height was given, we'll use the total height of the text as a default.
fn default_y_dimension<B: Backend>(&self, ui: &Ui<B>) -> Dimension {
use position::Sizeable;
let text = &self.text;
let font_size = self.style.font_size(&ui.theme);
let num_lines = match self.style.maybe_wrap(&ui.theme) {
None => text.lines().count(),
Some(wrap) => match self.get_w(ui) {
None => text.lines().count(),
Some(max_w) => match wrap {
Wrap::Character =>
ui.glyph_cache.line_breaks_by_character(font_size, text, max_w).count(),
Wrap::Whitespace =>
ui.glyph_cache.line_breaks_by_whitespace(font_size, text, max_w).count(),
},
},
};
let line_spacing = self.style.line_spacing(&ui.theme);
let height = total_height(cmp::max(num_lines, 1), font_size, line_spacing);
Dimension::Absolute(height)
}
/// Update the state of the Text.
fn update<B: Backend>(self, args: widget::UpdateArgs<Self, B>) {
let widget::UpdateArgs { rect, state, style, ui,.. } = args;
let Text { text,.. } = self;
let maybe_wrap = style.maybe_wrap(ui.theme());
let font_size = style.font_size(ui.theme());
// Produces an iterator yielding the line breaks for the `text`.
let new_line_breaks = || match maybe_wrap {
None =>
// This branch could be faster if we just used `.lines()` somehow.
ui.glyph_cache().line_breaks_by_character(font_size, text, ::std::f64::MAX),
Some(Wrap::Character) =>
ui.glyph_cache().line_breaks_by_character(font_size, text, rect.w()),
Some(Wrap::Whitespace) =>
ui.glyph_cache().line_breaks_by_whitespace(font_size, text, rect.w()),
};
// If the string is different, we must update both the string and the line breaks.
if &state.view().string[..]!= text {
state.update(|state| {
state.string = text.to_owned();
state.line_breaks = new_line_breaks().collect();
});
// Otherwise, we'll check to see if we have to update the line breaks.
} else {
use utils::write_if_different;
use std::borrow::Cow;
// Compare the line_breaks and only collect the new ones if they are different.
let maybe_new_line_breaks = {
let line_breaks = &state.view().line_breaks[..];
match write_if_different(line_breaks, new_line_breaks()) {
Cow::Owned(new) => Some(new),
_ => None,
}
};
if let Some(new_line_breaks) = maybe_new_line_breaks {
state.update(|state| state.line_breaks = new_line_breaks);
}
}
}
}
impl<'a> Colorable for Text<'a> {
fn color(mut self, color: Color) -> Self {
self.style.color = Some(color);
self
}
}
impl State {
/// Iterator that yields a new line at both "newline"s (i.e. `\n`) and `line_wrap_indices`.
pub fn lines(&self) -> Lines {
::glyph_cache::Lines::new(&self.string, self.line_breaks.iter().cloned())
}
// /// The total height from the top of the first line to the bottom of the last line.
// pub fn total_height(&self, font_size: FontSize, line_spacing: Scalar) -> Scalar {
// let num_lines = self.lines().count();
// total_height(num_lines, font_size, line_spacing)
// }
/// An iterator yielding a **Rect** (representing the absolute position and dimensions) for
/// every **line** in the `string`
pub fn line_rects<'a>(&'a self,
container: Rect,
h_align: Align,
font_size: FontSize,
line_spacing: Scalar) -> LineRects<'a, Lines<'a>>
{
let lines = self.lines();
LineRects::new(lines, container, h_align, font_size, line_spacing)
}
// /// An iterator yielding a **Rect** (representing the absolute position and dimensions) for
// /// every **character** in the `string`
// pub fn char_rects<'a, C>(&'a self,
// cache: &'a GlyphCache<C>,
// container: Rect,
// h_align: Align,
// font_size: FontSize,
// line_spacing: Scalar) -> CharRects<'a, C>
// where C: CharacterCache,
// {
// let lines = self.lines();
// let line_rects = self.line_rects(cache, container, h_align, font_size, line_spacing);
// let mut lines_with_rects = lines.zip(line_rects);
// let maybe_first_line = lines_with_rects.next().and_then(|(line, line_rect)| {
// char_widths_and_xs_for_line(cache, font_size, line, line_rect)
// .map(|char_widths_and_xs| (char_widths_and_xs, line_rect.y()))
// });
// let char_height = font_size as Scalar;
// CharRects {
// cache: cache,
// font_size: font_size,
// lines_with_rects: lines_with_rects,
// maybe_current_line: maybe_first_line,
// }
// }
}
/// Calculate the total height of the text from the given number of lines, font_size and
/// line_spacing.
pub fn total_height(num_lines: usize, font_size: FontSize, line_spacing: Scalar) -> Scalar {
font_size as Scalar * num_lines as Scalar + line_spacing * (num_lines - 1) as Scalar
}
/// Shorthand for the **Lines** iterator yielded by **State::lines**.
pub type Lines<'a> =
::glyph_cache::Lines<'a, ::std::iter::Cloned<::std::slice::Iter<'a, (usize, Option<LineBreak>)>>>;
/// An walker yielding a **Rect** (representing the absolute position and dimensions) for
/// every **line** in its given line iterator.
pub struct LineRects<'a, I: 'a> {
lines: I,
font_size: FontSize,
y_step: Scalar,
h_align: Align,
container_x: Range,
y: Scalar,
strs: ::std::marker::PhantomData<&'a ()>,
}
impl<'a, I> LineRects<'a, I> {
/// Construct a new **LineRects**.
pub fn new(lines: I,
container: Rect,
h_align: Align,
font_size: FontSize,
line_spacing: Scalar) -> LineRects<'a, I>
where I: Iterator<Item=&'a str>,
{
let height = font_size as Scalar;
LineRects {
lines: lines,
font_size: font_size,
y_step: -(line_spacing + height),
h_align: h_align,
container_x: container.x,
y: Range::new(0.0, height).align_end_of(container.y).middle(),
strs: ::std::marker::PhantomData,
}
}
// /// Returns the next line **Rect**.
// ///
// /// Returns **None** if there are no more lines in the **Lines** iter.
// ///
// /// This can be used similarly to an **Iterator**, i.e:
// ///
// /// `while let Some(rect) = line_rects.next() {... }`
// ///
// /// The difference being that this method does not require borrowing the **CharacterCache**.
// pub fn next<C>(&mut self, cache: &mut C) -> Option<Rect>
// where I: Iterator<Item=&'a str>,
// C: CharacterCache,
// {
// self.next_with_line(cache).map(|(rect, _)| rect)
// }
/// The same as [**LineRects::next**](./[email protected]) but also yields the
/// line's `&'a str` alongside the **Rect**.
pub fn next_with_line<C>(&mut self, cache: &mut C) -> Option<(Rect, &'a str)>
where I: Iterator<Item=&'a str>,
C: CharacterCache,
{
let LineRects { ref mut lines, font_size, y_step, h_align, container_x, ref mut y,.. } = *self;
lines.next().map(|line| {
let w = cache.width(font_size, line);
let h = font_size as Scalar;
let w_range = Range::new(0.0, w);
let x = match h_align {
Align::Start => w_range.align_start_of(container_x),
Align::Middle => w_range.align_middle_of(container_x),
Align::End => w_range.align_end_of(container_x),
}.middle();
let xy = [x, *y];
let wh = [w, h];
let rect = Rect::from_xy_dim(xy, wh);
*y += y_step;
(rect, line)
})
}
}
// /// Shorthand for the `CharWidths` iterator used within the `CharRects` iterator.
// pub type CharWidths<'a, C> = ::glyph_cache::CharWidths<'a, C, ::std::str::Chars<'a>>;
//
// /// Shorthand for the `CharXs` iterator used within the `CharRects` iterator.
// pub type CharXs<'a, C> = ::glyph_cache::CharXs<'a, C, ::std::str::Chars<'a>>;
//
// /// Shorthand for the `Zip`ped `CharWidths` and `CharXs` iterators used within the `CharRects`
// /// iterator.
// pub type CharWidthsAndXs<'a, C> = ::std::iter::Zip<CharWidths<'a, C>, CharXs<'a, C>>;
//
// /// Shorthand for the `Zip`ped `Lines` and `LineRects` iterators used within the `CharRects`
// /// iterator.
// pub type LinesWithRects<'a, C> = ::std::iter::Zip<Lines<'a>, LineRects<'a, C>>;
//
// type Y = Scalar;
// type CurrentLine<'a, C> = (CharWidthsAndXs<'a, C>, Y);
// /// An iterator yielding a **Rect** (representing the absolute position and dimensions) for
// /// every **character** in the `string`
// pub struct CharRects<'a, C: 'a> {
// font_size: FontSize,
// cache: &'a GlyphCache<C>,
// lines_with_rects: LinesWithRects<'a, C>,
// maybe_current_line: Option<CurrentLine<'a, C>>,
// }
//
//
// impl<'a, C> Iterator for CharRects<'a, C>
// where C: CharacterCache,
// {
// type Item = Rect;
// fn next(&mut self) -> Option<Self::Item> {
// let CharRects {
// font_size,
// cache,
// ref mut lines_with_rects,
// ref mut maybe_current_line,
// } = *self;
//
// // Continue trying each line until we find one with characters.
// loop {
// match *maybe_current_line {
// // If the current line has some characters, return the next `Rect`.
// Some((ref mut char_widths_and_xs, y)) => match char_widths_and_xs.next() {
// Some((w, x)) => {
// let xy = [x, y];
// let dim = [w, font_size as Scalar];
// return Some(Rect::from_xy_dim(xy, dim));
// },
// None => (),
// },
// // If we have no more lines, we're done.
// None => return None,
// }
//
// // If our line had no more characters, make the next line the current line.
// *maybe_current_line = lines_with_rects.next().and_then(|(line, line_rect)| {
// char_widths_and_xs_for_line(cache, font_size, line, line_rect)
// .map(|char_widths_and_xs| (char_widths_and_xs, line_rect.y()))
// });
// }
// }
// }
//
//
// /// Returns a `CharWidthsAndXs` iterator for the given `line`.
// ///
// /// Rturns `None` if there are no characters within the `line`.
// fn char_widths_and_xs_for_line<'a, C>(cache: &'a GlyphCache<C>,
// font_size: FontSize,
// line: &'a str,
// line_rect: Rect) -> Option<CharWidthsAndXs<'a, C>>
// where C: CharacterCache,
// {
// line.chars().next().map(|ch| {
// let ch_w = cache.char_width(font_size, ch);
// let ch_w_range = Range::new(0.0..ch_w);
// let start_x = ch_w_range.align_start_of(line_rect.x).middle();
// let char_widths = cache.char_widths(font_size, line.chars());
// let char_xs = cache.char_xs(font_size, start_x, line.chars());
// char_widths.zip(char_xs)
// })
// } | /// Align the text to the middle of its bounding **Rect**'s *x* axis range.
pub fn align_text_middle(mut self) -> Self {
self.style.text_align = Some(Align::Middle);
self | random_line_split |
text.rs | use std::cmp;
use {
Align,
Backend,
CharacterCache,
Color,
Colorable,
Dimension,
FontSize,
LineBreak,
Range,
Rect,
Scalar,
Ui,
Widget,
};
use widget;
/// Displays some given text centred within a rectangular area.
///
/// By default, the rectangular dimensions are fit to the area occuppied by the text.
///
/// If some horizontal dimension is given, the text will automatically wrap to the width and align
/// in accordance with the produced **Align**.
pub struct Text<'a> {
/// Data necessary and common for all widget builder types.
pub common: widget::CommonBuilder,
/// The text to be drawn by the **Text**.
pub text: &'a str,
/// Unique styling for the **Text**.
pub style: Style,
}
/// The unique kind for the widget.
pub const KIND: widget::Kind = "Text";
widget_style!{
KIND;
/// The styling for a **Text**'s graphics.
style Style {
/// The font size for the **Text**.
- font_size: FontSize { theme.font_size_medium }
/// The color of the **Text**.
- color: Color { theme.label_color }
/// Whether or not the text should wrap around the width.
- maybe_wrap: Option<Wrap> { Some(Wrap::Whitespace) }
/// The spacing between consecutive lines.
- line_spacing: Scalar { 1.0 }
/// Alignment of the text along the *x* axis.
- text_align: Align { Align::Start }
// /// The typeface with which the Text is rendered.
// - typeface: Path,
// /// The line styling for the text.
// - line: Option<Line> { None },
}
}
/// The way in which text should wrap around the width.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum Wrap {
/// Wrap at the first character that exceeds the width.
Character,
/// Wrap at the first word that exceeds the width.
Whitespace,
}
// /// Line styling for the **Text**.
// pub enum Line {
// /// Underline the text.
// Under,
// /// Overline the text.
// Over,
// /// Strikethrough the text.
// Through,
// }
/// The state to be stored between updates for the **Text**.
#[derive(Clone, Debug, PartialEq)]
pub struct State {
/// An owned version of the string.
string: String,
/// An index range for each line in the string.
line_breaks: Vec<(usize, Option<LineBreak>)>,
}
impl<'a> Text<'a> {
/// Build a new **Text** widget.
pub fn new(text: &'a str) -> Self {
Text {
common: widget::CommonBuilder::new(),
text: text,
style: Style::new(),
}
}
/// Build the **Text** with the given font size.
pub fn font_size(mut self, size: FontSize) -> Self {
self.style.font_size = Some(size);
self
}
/// Specify that the **Text** should not wrap lines around the width.
pub fn no_line_wrap(mut self) -> Self {
self.style.maybe_wrap = Some(None);
self
}
/// Line wrap the **Text** at the beginning of the first word that exceeds the width.
pub fn wrap_by_word(mut self) -> Self {
self.style.maybe_wrap = Some(Some(Wrap::Whitespace));
self
}
/// Line wrap the **Text** at the beginning of the first character that exceeds the width.
pub fn wrap_by_character(mut self) -> Self {
self.style.maybe_wrap = Some(Some(Wrap::Character));
self
}
/// Build the **Text** with the given **Style**.
pub fn with_style(mut self, style: Style) -> Self {
self.style = style;
self
}
/// Align the text to the left of its bounding **Rect**'s *x* axis range.
pub fn align_text_left(mut self) -> Self {
self.style.text_align = Some(Align::Start);
self
}
/// Align the text to the middle of its bounding **Rect**'s *x* axis range.
pub fn align_text_middle(mut self) -> Self {
self.style.text_align = Some(Align::Middle);
self
}
/// Align the text to the right of its bounding **Rect**'s *x* axis range.
pub fn align_text_right(mut self) -> Self {
self.style.text_align = Some(Align::End);
self
}
/// The height of the space used between consecutive lines.
pub fn line_spacing(mut self, height: Scalar) -> Self {
self.style.line_spacing = Some(height);
self
}
}
impl<'a> Widget for Text<'a> {
type State = State;
type Style = Style;
fn common(&self) -> &widget::CommonBuilder {
&self.common
}
fn common_mut(&mut self) -> &mut widget::CommonBuilder {
&mut self.common
}
fn unique_kind(&self) -> &'static str {
KIND
}
fn init_state(&self) -> State {
State {
string: String::new(),
line_breaks: Vec::new(),
}
}
fn | (&self) -> Style {
self.style.clone()
}
/// If no specific width was given, we'll use the width of the widest line as a default.
fn default_x_dimension<B: Backend>(&self, ui: &Ui<B>) -> Dimension {
let font_size = self.style.font_size(&ui.theme);
let mut max_width = 0.0;
for line in self.text.lines() {
let width = ui.glyph_cache.width(font_size, line);
max_width = ::utils::partial_max(max_width, width);
}
Dimension::Absolute(max_width)
}
/// If no specific height was given, we'll use the total height of the text as a default.
fn default_y_dimension<B: Backend>(&self, ui: &Ui<B>) -> Dimension {
use position::Sizeable;
let text = &self.text;
let font_size = self.style.font_size(&ui.theme);
let num_lines = match self.style.maybe_wrap(&ui.theme) {
None => text.lines().count(),
Some(wrap) => match self.get_w(ui) {
None => text.lines().count(),
Some(max_w) => match wrap {
Wrap::Character =>
ui.glyph_cache.line_breaks_by_character(font_size, text, max_w).count(),
Wrap::Whitespace =>
ui.glyph_cache.line_breaks_by_whitespace(font_size, text, max_w).count(),
},
},
};
let line_spacing = self.style.line_spacing(&ui.theme);
let height = total_height(cmp::max(num_lines, 1), font_size, line_spacing);
Dimension::Absolute(height)
}
/// Update the state of the Text.
fn update<B: Backend>(self, args: widget::UpdateArgs<Self, B>) {
let widget::UpdateArgs { rect, state, style, ui,.. } = args;
let Text { text,.. } = self;
let maybe_wrap = style.maybe_wrap(ui.theme());
let font_size = style.font_size(ui.theme());
// Produces an iterator yielding the line breaks for the `text`.
let new_line_breaks = || match maybe_wrap {
None =>
// This branch could be faster if we just used `.lines()` somehow.
ui.glyph_cache().line_breaks_by_character(font_size, text, ::std::f64::MAX),
Some(Wrap::Character) =>
ui.glyph_cache().line_breaks_by_character(font_size, text, rect.w()),
Some(Wrap::Whitespace) =>
ui.glyph_cache().line_breaks_by_whitespace(font_size, text, rect.w()),
};
// If the string is different, we must update both the string and the line breaks.
if &state.view().string[..]!= text {
state.update(|state| {
state.string = text.to_owned();
state.line_breaks = new_line_breaks().collect();
});
// Otherwise, we'll check to see if we have to update the line breaks.
} else {
use utils::write_if_different;
use std::borrow::Cow;
// Compare the line_breaks and only collect the new ones if they are different.
let maybe_new_line_breaks = {
let line_breaks = &state.view().line_breaks[..];
match write_if_different(line_breaks, new_line_breaks()) {
Cow::Owned(new) => Some(new),
_ => None,
}
};
if let Some(new_line_breaks) = maybe_new_line_breaks {
state.update(|state| state.line_breaks = new_line_breaks);
}
}
}
}
impl<'a> Colorable for Text<'a> {
fn color(mut self, color: Color) -> Self {
self.style.color = Some(color);
self
}
}
impl State {
/// Iterator that yields a new line at both "newline"s (i.e. `\n`) and `line_wrap_indices`.
pub fn lines(&self) -> Lines {
::glyph_cache::Lines::new(&self.string, self.line_breaks.iter().cloned())
}
// /// The total height from the top of the first line to the bottom of the last line.
// pub fn total_height(&self, font_size: FontSize, line_spacing: Scalar) -> Scalar {
// let num_lines = self.lines().count();
// total_height(num_lines, font_size, line_spacing)
// }
/// An iterator yielding a **Rect** (representing the absolute position and dimensions) for
/// every **line** in the `string`
pub fn line_rects<'a>(&'a self,
container: Rect,
h_align: Align,
font_size: FontSize,
line_spacing: Scalar) -> LineRects<'a, Lines<'a>>
{
let lines = self.lines();
LineRects::new(lines, container, h_align, font_size, line_spacing)
}
// /// An iterator yielding a **Rect** (representing the absolute position and dimensions) for
// /// every **character** in the `string`
// pub fn char_rects<'a, C>(&'a self,
// cache: &'a GlyphCache<C>,
// container: Rect,
// h_align: Align,
// font_size: FontSize,
// line_spacing: Scalar) -> CharRects<'a, C>
// where C: CharacterCache,
// {
// let lines = self.lines();
// let line_rects = self.line_rects(cache, container, h_align, font_size, line_spacing);
// let mut lines_with_rects = lines.zip(line_rects);
// let maybe_first_line = lines_with_rects.next().and_then(|(line, line_rect)| {
// char_widths_and_xs_for_line(cache, font_size, line, line_rect)
// .map(|char_widths_and_xs| (char_widths_and_xs, line_rect.y()))
// });
// let char_height = font_size as Scalar;
// CharRects {
// cache: cache,
// font_size: font_size,
// lines_with_rects: lines_with_rects,
// maybe_current_line: maybe_first_line,
// }
// }
}
/// Calculate the total height of the text from the given number of lines, font_size and
/// line_spacing.
pub fn total_height(num_lines: usize, font_size: FontSize, line_spacing: Scalar) -> Scalar {
font_size as Scalar * num_lines as Scalar + line_spacing * (num_lines - 1) as Scalar
}
/// Shorthand for the **Lines** iterator yielded by **State::lines**.
pub type Lines<'a> =
::glyph_cache::Lines<'a, ::std::iter::Cloned<::std::slice::Iter<'a, (usize, Option<LineBreak>)>>>;
/// An walker yielding a **Rect** (representing the absolute position and dimensions) for
/// every **line** in its given line iterator.
pub struct LineRects<'a, I: 'a> {
lines: I,
font_size: FontSize,
y_step: Scalar,
h_align: Align,
container_x: Range,
y: Scalar,
strs: ::std::marker::PhantomData<&'a ()>,
}
impl<'a, I> LineRects<'a, I> {
/// Construct a new **LineRects**.
pub fn new(lines: I,
container: Rect,
h_align: Align,
font_size: FontSize,
line_spacing: Scalar) -> LineRects<'a, I>
where I: Iterator<Item=&'a str>,
{
let height = font_size as Scalar;
LineRects {
lines: lines,
font_size: font_size,
y_step: -(line_spacing + height),
h_align: h_align,
container_x: container.x,
y: Range::new(0.0, height).align_end_of(container.y).middle(),
strs: ::std::marker::PhantomData,
}
}
// /// Returns the next line **Rect**.
// ///
// /// Returns **None** if there are no more lines in the **Lines** iter.
// ///
// /// This can be used similarly to an **Iterator**, i.e:
// ///
// /// `while let Some(rect) = line_rects.next() {... }`
// ///
// /// The difference being that this method does not require borrowing the **CharacterCache**.
// pub fn next<C>(&mut self, cache: &mut C) -> Option<Rect>
// where I: Iterator<Item=&'a str>,
// C: CharacterCache,
// {
// self.next_with_line(cache).map(|(rect, _)| rect)
// }
/// The same as [**LineRects::next**](./[email protected]) but also yields the
/// line's `&'a str` alongside the **Rect**.
pub fn next_with_line<C>(&mut self, cache: &mut C) -> Option<(Rect, &'a str)>
where I: Iterator<Item=&'a str>,
C: CharacterCache,
{
let LineRects { ref mut lines, font_size, y_step, h_align, container_x, ref mut y,.. } = *self;
lines.next().map(|line| {
let w = cache.width(font_size, line);
let h = font_size as Scalar;
let w_range = Range::new(0.0, w);
let x = match h_align {
Align::Start => w_range.align_start_of(container_x),
Align::Middle => w_range.align_middle_of(container_x),
Align::End => w_range.align_end_of(container_x),
}.middle();
let xy = [x, *y];
let wh = [w, h];
let rect = Rect::from_xy_dim(xy, wh);
*y += y_step;
(rect, line)
})
}
}
// /// Shorthand for the `CharWidths` iterator used within the `CharRects` iterator.
// pub type CharWidths<'a, C> = ::glyph_cache::CharWidths<'a, C, ::std::str::Chars<'a>>;
//
// /// Shorthand for the `CharXs` iterator used within the `CharRects` iterator.
// pub type CharXs<'a, C> = ::glyph_cache::CharXs<'a, C, ::std::str::Chars<'a>>;
//
// /// Shorthand for the `Zip`ped `CharWidths` and `CharXs` iterators used within the `CharRects`
// /// iterator.
// pub type CharWidthsAndXs<'a, C> = ::std::iter::Zip<CharWidths<'a, C>, CharXs<'a, C>>;
//
// /// Shorthand for the `Zip`ped `Lines` and `LineRects` iterators used within the `CharRects`
// /// iterator.
// pub type LinesWithRects<'a, C> = ::std::iter::Zip<Lines<'a>, LineRects<'a, C>>;
//
// type Y = Scalar;
// type CurrentLine<'a, C> = (CharWidthsAndXs<'a, C>, Y);
// /// An iterator yielding a **Rect** (representing the absolute position and dimensions) for
// /// every **character** in the `string`
// pub struct CharRects<'a, C: 'a> {
// font_size: FontSize,
// cache: &'a GlyphCache<C>,
// lines_with_rects: LinesWithRects<'a, C>,
// maybe_current_line: Option<CurrentLine<'a, C>>,
// }
//
//
// impl<'a, C> Iterator for CharRects<'a, C>
// where C: CharacterCache,
// {
// type Item = Rect;
// fn next(&mut self) -> Option<Self::Item> {
// let CharRects {
// font_size,
// cache,
// ref mut lines_with_rects,
// ref mut maybe_current_line,
// } = *self;
//
// // Continue trying each line until we find one with characters.
// loop {
// match *maybe_current_line {
// // If the current line has some characters, return the next `Rect`.
// Some((ref mut char_widths_and_xs, y)) => match char_widths_and_xs.next() {
// Some((w, x)) => {
// let xy = [x, y];
// let dim = [w, font_size as Scalar];
// return Some(Rect::from_xy_dim(xy, dim));
// },
// None => (),
// },
// // If we have no more lines, we're done.
// None => return None,
// }
//
// // If our line had no more characters, make the next line the current line.
// *maybe_current_line = lines_with_rects.next().and_then(|(line, line_rect)| {
// char_widths_and_xs_for_line(cache, font_size, line, line_rect)
// .map(|char_widths_and_xs| (char_widths_and_xs, line_rect.y()))
// });
// }
// }
// }
//
//
// /// Returns a `CharWidthsAndXs` iterator for the given `line`.
// ///
// /// Rturns `None` if there are no characters within the `line`.
// fn char_widths_and_xs_for_line<'a, C>(cache: &'a GlyphCache<C>,
// font_size: FontSize,
// line: &'a str,
// line_rect: Rect) -> Option<CharWidthsAndXs<'a, C>>
// where C: CharacterCache,
// {
// line.chars().next().map(|ch| {
// let ch_w = cache.char_width(font_size, ch);
// let ch_w_range = Range::new(0.0..ch_w);
// let start_x = ch_w_range.align_start_of(line_rect.x).middle();
// let char_widths = cache.char_widths(font_size, line.chars());
// let char_xs = cache.char_xs(font_size, start_x, line.chars());
// char_widths.zip(char_xs)
// })
// }
| style | identifier_name |
text.rs | use std::cmp;
use {
Align,
Backend,
CharacterCache,
Color,
Colorable,
Dimension,
FontSize,
LineBreak,
Range,
Rect,
Scalar,
Ui,
Widget,
};
use widget;
/// Displays some given text centred within a rectangular area.
///
/// By default, the rectangular dimensions are fit to the area occuppied by the text.
///
/// If some horizontal dimension is given, the text will automatically wrap to the width and align
/// in accordance with the produced **Align**.
pub struct Text<'a> {
/// Data necessary and common for all widget builder types.
pub common: widget::CommonBuilder,
/// The text to be drawn by the **Text**.
pub text: &'a str,
/// Unique styling for the **Text**.
pub style: Style,
}
/// The unique kind for the widget.
pub const KIND: widget::Kind = "Text";
widget_style!{
KIND;
/// The styling for a **Text**'s graphics.
style Style {
/// The font size for the **Text**.
- font_size: FontSize { theme.font_size_medium }
/// The color of the **Text**.
- color: Color { theme.label_color }
/// Whether or not the text should wrap around the width.
- maybe_wrap: Option<Wrap> { Some(Wrap::Whitespace) }
/// The spacing between consecutive lines.
- line_spacing: Scalar { 1.0 }
/// Alignment of the text along the *x* axis.
- text_align: Align { Align::Start }
// /// The typeface with which the Text is rendered.
// - typeface: Path,
// /// The line styling for the text.
// - line: Option<Line> { None },
}
}
/// The way in which text should wrap around the width.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum Wrap {
/// Wrap at the first character that exceeds the width.
Character,
/// Wrap at the first word that exceeds the width.
Whitespace,
}
// /// Line styling for the **Text**.
// pub enum Line {
// /// Underline the text.
// Under,
// /// Overline the text.
// Over,
// /// Strikethrough the text.
// Through,
// }
/// The state to be stored between updates for the **Text**.
#[derive(Clone, Debug, PartialEq)]
pub struct State {
/// An owned version of the string.
string: String,
/// An index range for each line in the string.
line_breaks: Vec<(usize, Option<LineBreak>)>,
}
impl<'a> Text<'a> {
/// Build a new **Text** widget.
pub fn new(text: &'a str) -> Self {
Text {
common: widget::CommonBuilder::new(),
text: text,
style: Style::new(),
}
}
/// Build the **Text** with the given font size.
pub fn font_size(mut self, size: FontSize) -> Self |
/// Specify that the **Text** should not wrap lines around the width.
pub fn no_line_wrap(mut self) -> Self {
self.style.maybe_wrap = Some(None);
self
}
/// Line wrap the **Text** at the beginning of the first word that exceeds the width.
pub fn wrap_by_word(mut self) -> Self {
self.style.maybe_wrap = Some(Some(Wrap::Whitespace));
self
}
/// Line wrap the **Text** at the beginning of the first character that exceeds the width.
pub fn wrap_by_character(mut self) -> Self {
self.style.maybe_wrap = Some(Some(Wrap::Character));
self
}
/// Build the **Text** with the given **Style**.
pub fn with_style(mut self, style: Style) -> Self {
self.style = style;
self
}
/// Align the text to the left of its bounding **Rect**'s *x* axis range.
pub fn align_text_left(mut self) -> Self {
self.style.text_align = Some(Align::Start);
self
}
/// Align the text to the middle of its bounding **Rect**'s *x* axis range.
pub fn align_text_middle(mut self) -> Self {
self.style.text_align = Some(Align::Middle);
self
}
/// Align the text to the right of its bounding **Rect**'s *x* axis range.
pub fn align_text_right(mut self) -> Self {
self.style.text_align = Some(Align::End);
self
}
/// The height of the space used between consecutive lines.
pub fn line_spacing(mut self, height: Scalar) -> Self {
self.style.line_spacing = Some(height);
self
}
}
impl<'a> Widget for Text<'a> {
type State = State;
type Style = Style;
fn common(&self) -> &widget::CommonBuilder {
&self.common
}
fn common_mut(&mut self) -> &mut widget::CommonBuilder {
&mut self.common
}
fn unique_kind(&self) -> &'static str {
KIND
}
fn init_state(&self) -> State {
State {
string: String::new(),
line_breaks: Vec::new(),
}
}
fn style(&self) -> Style {
self.style.clone()
}
/// If no specific width was given, we'll use the width of the widest line as a default.
fn default_x_dimension<B: Backend>(&self, ui: &Ui<B>) -> Dimension {
let font_size = self.style.font_size(&ui.theme);
let mut max_width = 0.0;
for line in self.text.lines() {
let width = ui.glyph_cache.width(font_size, line);
max_width = ::utils::partial_max(max_width, width);
}
Dimension::Absolute(max_width)
}
/// If no specific height was given, we'll use the total height of the text as a default.
fn default_y_dimension<B: Backend>(&self, ui: &Ui<B>) -> Dimension {
use position::Sizeable;
let text = &self.text;
let font_size = self.style.font_size(&ui.theme);
let num_lines = match self.style.maybe_wrap(&ui.theme) {
None => text.lines().count(),
Some(wrap) => match self.get_w(ui) {
None => text.lines().count(),
Some(max_w) => match wrap {
Wrap::Character =>
ui.glyph_cache.line_breaks_by_character(font_size, text, max_w).count(),
Wrap::Whitespace =>
ui.glyph_cache.line_breaks_by_whitespace(font_size, text, max_w).count(),
},
},
};
let line_spacing = self.style.line_spacing(&ui.theme);
let height = total_height(cmp::max(num_lines, 1), font_size, line_spacing);
Dimension::Absolute(height)
}
/// Update the state of the Text.
fn update<B: Backend>(self, args: widget::UpdateArgs<Self, B>) {
let widget::UpdateArgs { rect, state, style, ui,.. } = args;
let Text { text,.. } = self;
let maybe_wrap = style.maybe_wrap(ui.theme());
let font_size = style.font_size(ui.theme());
// Produces an iterator yielding the line breaks for the `text`.
let new_line_breaks = || match maybe_wrap {
None =>
// This branch could be faster if we just used `.lines()` somehow.
ui.glyph_cache().line_breaks_by_character(font_size, text, ::std::f64::MAX),
Some(Wrap::Character) =>
ui.glyph_cache().line_breaks_by_character(font_size, text, rect.w()),
Some(Wrap::Whitespace) =>
ui.glyph_cache().line_breaks_by_whitespace(font_size, text, rect.w()),
};
// If the string is different, we must update both the string and the line breaks.
if &state.view().string[..]!= text {
state.update(|state| {
state.string = text.to_owned();
state.line_breaks = new_line_breaks().collect();
});
// Otherwise, we'll check to see if we have to update the line breaks.
} else {
use utils::write_if_different;
use std::borrow::Cow;
// Compare the line_breaks and only collect the new ones if they are different.
let maybe_new_line_breaks = {
let line_breaks = &state.view().line_breaks[..];
match write_if_different(line_breaks, new_line_breaks()) {
Cow::Owned(new) => Some(new),
_ => None,
}
};
if let Some(new_line_breaks) = maybe_new_line_breaks {
state.update(|state| state.line_breaks = new_line_breaks);
}
}
}
}
impl<'a> Colorable for Text<'a> {
fn color(mut self, color: Color) -> Self {
self.style.color = Some(color);
self
}
}
impl State {
/// Iterator that yields a new line at both "newline"s (i.e. `\n`) and `line_wrap_indices`.
pub fn lines(&self) -> Lines {
::glyph_cache::Lines::new(&self.string, self.line_breaks.iter().cloned())
}
// /// The total height from the top of the first line to the bottom of the last line.
// pub fn total_height(&self, font_size: FontSize, line_spacing: Scalar) -> Scalar {
// let num_lines = self.lines().count();
// total_height(num_lines, font_size, line_spacing)
// }
/// An iterator yielding a **Rect** (representing the absolute position and dimensions) for
/// every **line** in the `string`
pub fn line_rects<'a>(&'a self,
container: Rect,
h_align: Align,
font_size: FontSize,
line_spacing: Scalar) -> LineRects<'a, Lines<'a>>
{
let lines = self.lines();
LineRects::new(lines, container, h_align, font_size, line_spacing)
}
// /// An iterator yielding a **Rect** (representing the absolute position and dimensions) for
// /// every **character** in the `string`
// pub fn char_rects<'a, C>(&'a self,
// cache: &'a GlyphCache<C>,
// container: Rect,
// h_align: Align,
// font_size: FontSize,
// line_spacing: Scalar) -> CharRects<'a, C>
// where C: CharacterCache,
// {
// let lines = self.lines();
// let line_rects = self.line_rects(cache, container, h_align, font_size, line_spacing);
// let mut lines_with_rects = lines.zip(line_rects);
// let maybe_first_line = lines_with_rects.next().and_then(|(line, line_rect)| {
// char_widths_and_xs_for_line(cache, font_size, line, line_rect)
// .map(|char_widths_and_xs| (char_widths_and_xs, line_rect.y()))
// });
// let char_height = font_size as Scalar;
// CharRects {
// cache: cache,
// font_size: font_size,
// lines_with_rects: lines_with_rects,
// maybe_current_line: maybe_first_line,
// }
// }
}
/// Calculate the total height of the text from the given number of lines, font_size and
/// line_spacing.
pub fn total_height(num_lines: usize, font_size: FontSize, line_spacing: Scalar) -> Scalar {
font_size as Scalar * num_lines as Scalar + line_spacing * (num_lines - 1) as Scalar
}
/// Shorthand for the **Lines** iterator yielded by **State::lines**.
pub type Lines<'a> =
::glyph_cache::Lines<'a, ::std::iter::Cloned<::std::slice::Iter<'a, (usize, Option<LineBreak>)>>>;
/// An walker yielding a **Rect** (representing the absolute position and dimensions) for
/// every **line** in its given line iterator.
pub struct LineRects<'a, I: 'a> {
lines: I,
font_size: FontSize,
y_step: Scalar,
h_align: Align,
container_x: Range,
y: Scalar,
strs: ::std::marker::PhantomData<&'a ()>,
}
impl<'a, I> LineRects<'a, I> {
/// Construct a new **LineRects**.
pub fn new(lines: I,
container: Rect,
h_align: Align,
font_size: FontSize,
line_spacing: Scalar) -> LineRects<'a, I>
where I: Iterator<Item=&'a str>,
{
let height = font_size as Scalar;
LineRects {
lines: lines,
font_size: font_size,
y_step: -(line_spacing + height),
h_align: h_align,
container_x: container.x,
y: Range::new(0.0, height).align_end_of(container.y).middle(),
strs: ::std::marker::PhantomData,
}
}
// /// Returns the next line **Rect**.
// ///
// /// Returns **None** if there are no more lines in the **Lines** iter.
// ///
// /// This can be used similarly to an **Iterator**, i.e:
// ///
// /// `while let Some(rect) = line_rects.next() {... }`
// ///
// /// The difference being that this method does not require borrowing the **CharacterCache**.
// pub fn next<C>(&mut self, cache: &mut C) -> Option<Rect>
// where I: Iterator<Item=&'a str>,
// C: CharacterCache,
// {
// self.next_with_line(cache).map(|(rect, _)| rect)
// }
/// The same as [**LineRects::next**](./[email protected]) but also yields the
/// line's `&'a str` alongside the **Rect**.
pub fn next_with_line<C>(&mut self, cache: &mut C) -> Option<(Rect, &'a str)>
where I: Iterator<Item=&'a str>,
C: CharacterCache,
{
let LineRects { ref mut lines, font_size, y_step, h_align, container_x, ref mut y,.. } = *self;
lines.next().map(|line| {
let w = cache.width(font_size, line);
let h = font_size as Scalar;
let w_range = Range::new(0.0, w);
let x = match h_align {
Align::Start => w_range.align_start_of(container_x),
Align::Middle => w_range.align_middle_of(container_x),
Align::End => w_range.align_end_of(container_x),
}.middle();
let xy = [x, *y];
let wh = [w, h];
let rect = Rect::from_xy_dim(xy, wh);
*y += y_step;
(rect, line)
})
}
}
// /// Shorthand for the `CharWidths` iterator used within the `CharRects` iterator.
// pub type CharWidths<'a, C> = ::glyph_cache::CharWidths<'a, C, ::std::str::Chars<'a>>;
//
// /// Shorthand for the `CharXs` iterator used within the `CharRects` iterator.
// pub type CharXs<'a, C> = ::glyph_cache::CharXs<'a, C, ::std::str::Chars<'a>>;
//
// /// Shorthand for the `Zip`ped `CharWidths` and `CharXs` iterators used within the `CharRects`
// /// iterator.
// pub type CharWidthsAndXs<'a, C> = ::std::iter::Zip<CharWidths<'a, C>, CharXs<'a, C>>;
//
// /// Shorthand for the `Zip`ped `Lines` and `LineRects` iterators used within the `CharRects`
// /// iterator.
// pub type LinesWithRects<'a, C> = ::std::iter::Zip<Lines<'a>, LineRects<'a, C>>;
//
// type Y = Scalar;
// type CurrentLine<'a, C> = (CharWidthsAndXs<'a, C>, Y);
// /// An iterator yielding a **Rect** (representing the absolute position and dimensions) for
// /// every **character** in the `string`
// pub struct CharRects<'a, C: 'a> {
// font_size: FontSize,
// cache: &'a GlyphCache<C>,
// lines_with_rects: LinesWithRects<'a, C>,
// maybe_current_line: Option<CurrentLine<'a, C>>,
// }
//
//
// impl<'a, C> Iterator for CharRects<'a, C>
// where C: CharacterCache,
// {
// type Item = Rect;
// fn next(&mut self) -> Option<Self::Item> {
// let CharRects {
// font_size,
// cache,
// ref mut lines_with_rects,
// ref mut maybe_current_line,
// } = *self;
//
// // Continue trying each line until we find one with characters.
// loop {
// match *maybe_current_line {
// // If the current line has some characters, return the next `Rect`.
// Some((ref mut char_widths_and_xs, y)) => match char_widths_and_xs.next() {
// Some((w, x)) => {
// let xy = [x, y];
// let dim = [w, font_size as Scalar];
// return Some(Rect::from_xy_dim(xy, dim));
// },
// None => (),
// },
// // If we have no more lines, we're done.
// None => return None,
// }
//
// // If our line had no more characters, make the next line the current line.
// *maybe_current_line = lines_with_rects.next().and_then(|(line, line_rect)| {
// char_widths_and_xs_for_line(cache, font_size, line, line_rect)
// .map(|char_widths_and_xs| (char_widths_and_xs, line_rect.y()))
// });
// }
// }
// }
//
//
// /// Returns a `CharWidthsAndXs` iterator for the given `line`.
// ///
// /// Rturns `None` if there are no characters within the `line`.
// fn char_widths_and_xs_for_line<'a, C>(cache: &'a GlyphCache<C>,
// font_size: FontSize,
// line: &'a str,
// line_rect: Rect) -> Option<CharWidthsAndXs<'a, C>>
// where C: CharacterCache,
// {
// line.chars().next().map(|ch| {
// let ch_w = cache.char_width(font_size, ch);
// let ch_w_range = Range::new(0.0..ch_w);
// let start_x = ch_w_range.align_start_of(line_rect.x).middle();
// let char_widths = cache.char_widths(font_size, line.chars());
// let char_xs = cache.char_xs(font_size, start_x, line.chars());
// char_widths.zip(char_xs)
// })
// }
| {
self.style.font_size = Some(size);
self
} | identifier_body |
sprite.rs | #[derive(Copy, Clone, Eq, PartialEq)]
pub enum Color {
Navy, Green, Teal, Maroon,
Purple, Brown, Gray, Dark,
Blue, Lime, Aqua, Red,
Pink, Yellow, White,
}
use self::Color::*;
pub const NAVY: &'static [Color] = &[Navy];
pub const GREEN: &'static [Color] = &[Green];
pub const TEAL: &'static [Color] = &[Teal];
pub const MAROON: &'static [Color] = &[Maroon];
pub const PURPLE: &'static [Color] = &[Purple];
pub const BROWN: &'static [Color] = &[Brown];
pub const GRAY: &'static [Color] = &[Gray];
pub const DARK: &'static [Color] = &[Dark];
pub const BLUE: &'static [Color] = &[Blue];
pub const LIME: &'static [Color] = &[Lime];
pub const AQUA: &'static [Color] = &[Aqua];
pub const RED: &'static [Color] = &[Red];
pub const PINK: &'static [Color] = &[Pink];
pub const YELLOW: &'static [Color] = &[Yellow];
pub const WHITE: &'static [Color] = &[White];
pub const GLITCH: &'static [Color] = &[Yellow, Pink];
pub const GOLD: &'static [Color] = &[Red, Yellow, Brown, White];
pub const SAPPHIRE: &'static [Color] = &[Blue, Navy, Aqua, Teal];
#[derive(Copy, Clone)]
pub struct Sprite {
pub character: char,
pub color: &'static [Color],
}
pub const HIDDEN: Sprite = Sprite {
character:'',
color: DARK
};
impl Sprite {
pub fn | (appearance: u8, bright: bool) -> Self {
Sprite {
character: ('!' as u8 + (appearance & 0b00011111)) as char,
color: match (appearance >> 5, bright) {
(0b000, false) => DARK,
(0b001, false) => NAVY,
(0b010, false) => GREEN,
(0b011, false) => TEAL,
(0b100, false) => MAROON,
(0b101, false) => PURPLE,
(0b110, false) => BROWN,
(0b111, false) => GRAY,
(0b000, true) => DARK,
(0b001, true) => BLUE,
(0b010, true) => LIME,
(0b011, true) => AQUA,
(0b100, true) => RED,
(0b101, true) => PINK,
(0b110, true) => YELLOW,
_ => WHITE,
}
}
}
pub fn darken(self, shade: bool) -> Self {
if shade {
Sprite {
character: self.character,
color: DARK
}
} else {
self
}
}
}
| of_byte | identifier_name |
sprite.rs | #[derive(Copy, Clone, Eq, PartialEq)]
pub enum Color {
Navy, Green, Teal, Maroon,
Purple, Brown, Gray, Dark,
Blue, Lime, Aqua, Red,
Pink, Yellow, White,
}
use self::Color::*;
pub const NAVY: &'static [Color] = &[Navy];
pub const GREEN: &'static [Color] = &[Green];
pub const TEAL: &'static [Color] = &[Teal];
pub const MAROON: &'static [Color] = &[Maroon];
pub const PURPLE: &'static [Color] = &[Purple];
pub const BROWN: &'static [Color] = &[Brown];
pub const GRAY: &'static [Color] = &[Gray];
pub const DARK: &'static [Color] = &[Dark];
pub const BLUE: &'static [Color] = &[Blue];
pub const LIME: &'static [Color] = &[Lime];
pub const AQUA: &'static [Color] = &[Aqua];
pub const RED: &'static [Color] = &[Red];
pub const PINK: &'static [Color] = &[Pink];
pub const YELLOW: &'static [Color] = &[Yellow];
pub const WHITE: &'static [Color] = &[White];
pub const GLITCH: &'static [Color] = &[Yellow, Pink];
pub const GOLD: &'static [Color] = &[Red, Yellow, Brown, White];
pub const SAPPHIRE: &'static [Color] = &[Blue, Navy, Aqua, Teal];
#[derive(Copy, Clone)]
pub struct Sprite {
pub character: char,
pub color: &'static [Color],
} | pub const HIDDEN: Sprite = Sprite {
character:'',
color: DARK
};
impl Sprite {
pub fn of_byte(appearance: u8, bright: bool) -> Self {
Sprite {
character: ('!' as u8 + (appearance & 0b00011111)) as char,
color: match (appearance >> 5, bright) {
(0b000, false) => DARK,
(0b001, false) => NAVY,
(0b010, false) => GREEN,
(0b011, false) => TEAL,
(0b100, false) => MAROON,
(0b101, false) => PURPLE,
(0b110, false) => BROWN,
(0b111, false) => GRAY,
(0b000, true) => DARK,
(0b001, true) => BLUE,
(0b010, true) => LIME,
(0b011, true) => AQUA,
(0b100, true) => RED,
(0b101, true) => PINK,
(0b110, true) => YELLOW,
_ => WHITE,
}
}
}
pub fn darken(self, shade: bool) -> Self {
if shade {
Sprite {
character: self.character,
color: DARK
}
} else {
self
}
}
} | random_line_split |
|
gen.rs | use backend::obj::*;
use csv;
use num;
use rand;
use rand::Rng;
use serialize::base64::{self, FromBase64, ToBase64};
use std::cmp;
use std::f32::consts;
use std::fmt;
use std::slice::Iter;
pub type Dna = Box<[u8]>;
const MAX_POLY_SIDES: u8 = 8; // in conformity with box2d?
fn bit_count(p: usize) -> usize { p << 3 }
fn split_bit(p: usize) -> (usize, u8) { (p >> 3, (p & 0x7) as u8) }
pub struct GenePool {
gene_pool: Box<[Dna]>,
round_robin: usize,
}
impl GenePool {
pub fn gene_pool_iter(&self) -> Iter<Dna> { self.gene_pool.iter() }
pub fn gene_pool_index(&self) -> usize { self.round_robin }
pub fn populate_from_base64(&mut self, base64: &[String], round_robin: usize) {
self.gene_pool =
base64.iter().map(|s| s.from_base64().unwrap().into_boxed_slice()).collect::<Vec<_>>().into_boxed_slice();
self.round_robin = round_robin;
}
pub fn parse_from_base64(base64: &[&str]) -> Self {
GenePool {
gene_pool: base64
.iter()
.map(|s| s.from_base64().unwrap().into_boxed_slice())
.collect::<Vec<_>>()
.into_boxed_slice(),
round_robin: 0,
}
}
pub fn parse_from_resource(data: &[u8]) -> Self {
let mut gene_pool = Vec::new();
let mut csv = csv::Reader::from_bytes(data).has_headers(false);
for row in csv.records() {
let fields = row.unwrap();
gene_pool.push(fields[0].from_base64().unwrap().into_boxed_slice());
}
GenePool { gene_pool: gene_pool.to_vec().into_boxed_slice(), round_robin: 0 }
}
pub fn len(&self) -> usize { self.gene_pool.len() }
#[allow(dead_code)]
pub fn new(gene_pool: &[Dna]) -> Self {
GenePool { gene_pool: gene_pool.to_vec().into_boxed_slice(), round_robin: 0 }
}
pub fn randomize(&mut self) {
let mut rnd = Randomizer::new();
self.gene_pool[self.round_robin] = rnd.seed().dna_cloned();
}
pub fn next(&mut self) -> Genome {
let gen = Genome::copy_from(&self.gene_pool[self.round_robin].clone());
let mutated = gen.mutate(&mut rand::thread_rng());
self.gene_pool[self.round_robin] = mutated.dna_cloned();
self.round_robin = (self.round_robin + 1) % self.gene_pool.len();
gen
}
}
#[allow(dead_code)]
pub trait Generator {
fn next_float<T>(&mut self, min: T, max: T) -> T
where T: rand::Rand + num::Float;
fn next_integer<T>(&mut self, min: T, max: T) -> T
where T: rand::Rand + num::Integer + num::ToPrimitive + num::FromPrimitive + Copy;
fn next_bool(&mut self) -> bool { self.next_integer::<u8>(0, 1) == 1 }
fn ball(&mut self) -> Shape {
let radius: f32 = self.next_float(0.5, 0.75);
Shape::new_ball(radius)
}
fn quad(&mut self) -> Shape {
let radius: f32 = self.next_float(1.0, 2.0);
let ratio: f32 = self.next_float(1.0, 2.0);
Shape::new_box(radius, ratio)
}
fn vbar(&mut self) -> Shape {
let radius: f32 = self.next_float(1.0, 2.0);
let ratio: f32 = self.next_float(0.1, 0.2);
Shape::new_box(radius, ratio)
}
fn triangle(&mut self) -> Shape {
let radius = self.next_float(0.5, 1.0);
let alpha1 = self.next_float(consts::PI * 0.5, consts::PI * 0.8);
let alpha2 = self.next_float(consts::PI * 1.2, consts::PI * 1.5);
Shape::new_triangle(radius, alpha1, alpha2)
}
fn iso_triangle(&mut self) -> Shape {
let radius = self.next_float(0.5, 1.0);
let alpha1 = self.next_float(consts::PI * 0.5, consts::PI * 0.8);
let alpha2 = consts::PI * 2. - alpha1;
Shape::new_triangle(radius, alpha1, alpha2)
}
fn eq_triangle(&mut self) -> Shape {
let radius = self.next_float(0.5, 1.0);
let alpha1 = consts::PI * 2. / 3.;
let alpha2 = consts::PI * 2. - alpha1;
Shape::new_triangle(radius, alpha1, alpha2)
}
fn star(&mut self) -> Shape {
let radius: f32 = self.next_float(1.0, 2.0);
// if pie slices are too small physics freaks out
let n = self.next_integer(3, if radius > 1.5 { MAX_POLY_SIDES } else { MAX_POLY_SIDES - 2 });
let ratio1 = self.next_float(0.5, 1.0);
let ratio2 = self.next_float(0.7, 0.9) * (1. / ratio1);
Shape::new_star(n, radius, ratio1, ratio2)
}
fn | (&mut self, upside_down: bool) -> Shape {
let n = self.next_integer(3, MAX_POLY_SIDES);
self.npoly(n, upside_down)
}
fn any_poly(&mut self) -> Shape {
let n = self.next_integer(3, MAX_POLY_SIDES);
let upside_down = self.next_bool();
self.npoly(n, upside_down)
}
fn npoly(&mut self, n: AttachmentIndex, upside_down: bool) -> Shape {
let radius: f32 = self.next_float(1.0, 2.0);
let ratio1 = f32::cos(consts::PI / f32::from(n));
let corrected_radius = if upside_down { radius * ratio1 } else { radius };
if n <= MAX_POLY_SIDES {
Shape::new_poly(if upside_down { -1 } else { 1 } * n as i8, corrected_radius)
} else {
let ratio2 = 1. / ratio1;
if upside_down {
Shape::new_star(n, corrected_radius, ratio2, ratio1)
} else {
Shape::new_star(n, corrected_radius, ratio1, ratio2)
}
}
}
}
#[allow(dead_code)]
pub struct Randomizer<R>
where R: rand::Rng {
rng: R,
}
#[allow(dead_code)]
impl Randomizer<rand::ThreadRng> {
pub fn new() -> Randomizer<rand::ThreadRng> { Randomizer { rng: rand::thread_rng() } }
}
impl Generator for Randomizer<rand::ThreadRng> {
fn next_float<T>(&mut self, min: T, max: T) -> T
where T: rand::Rand + num::Float {
self.rng.gen::<T>() * (max - min) + min
}
fn next_integer<T>(&mut self, min: T, max: T) -> T
where T: rand::Rand + num::Integer + Copy {
self.rng.gen::<T>() % (max - min + T::one()) + min
}
}
trait Seeder {
fn seed(&mut self) -> Genome;
}
impl<R> Seeder for Randomizer<R>
where R: rand::Rng
{
fn seed(&mut self) -> Genome {
let mut dna = vec![0u8; 72];
self.rng.fill_bytes(dna.as_mut_slice());
Genome::new(dna)
}
}
#[derive(Clone)]
pub struct Genome {
dna: Box<[u8]>,
ptr: usize,
bit_count: usize,
}
impl Genome {
pub fn copy_from(dna: &[u8]) -> Self {
Genome { ptr: 0, bit_count: bit_count(dna.len()), dna: dna.to_owned().into_boxed_slice() }
}
pub fn new(dna: Vec<u8>) -> Self { Genome { ptr: 0, bit_count: bit_count(dna.len()), dna: dna.into_boxed_slice() } }
#[inline]
fn next_bit(&mut self) -> u8 {
let (byte, bit) = split_bit(self.ptr);
let next = (self.dna[byte] & (1 << bit)) >> bit;
self.ptr = (self.ptr + 1) % self.bit_count;
next
}
#[inline]
fn next_bits(&mut self, n: u8) -> i64 {
//use std::iter;
//iter::repeat_with(|| i64::from(self.next_bit())).take(usize::from(n)).fold(0,
// |a, bit| a << 1 | bit)
(0..n).fold(0, |a, _| a << 1 | i64::from(self.next_bit()))
}
#[inline]
fn count_bits(d: u64) -> u8 { (64 - d.leading_zeros()) as u8 }
fn next_i32(&mut self, min: i32, max: i32) -> i32 {
let diff = i64::from(max) - i64::from(min) + 1i64;
if diff <= 0 {
min
} else {
(self.next_bits(Self::count_bits(diff as u64)) % diff + i64::from(min)) as i32
}
}
pub fn crossover<R: rand::Rng>(&self, rng: &mut R, other: &Dna) -> Self {
let len = cmp::min(self.bit_count, bit_count(other.len()));
let (byte, bit) = split_bit(rng.gen::<usize>() % len);
let flip_mask = if rng.gen::<bool>() { 0xffu8 } else { 0x0u8 };
let mut new_genes = self.dna.to_vec();
for i in 0..len / 8 {
let a = new_genes[i];
let b = other[i];
let mask = if i < byte || (bit == 0 && i == byte) {
0x00u8
} else if i > byte {
0xffu8
} else {
(0xffu8 >> (8 - bit)) as u8
} ^ flip_mask;
new_genes[i] = (mask & a) | (!mask & b);
}
debug!(
"crossover at {}/{}: {} * {} -> {}",
byte,
bit,
self.dna.to_base64(base64::STANDARD),
other.to_base64(base64::STANDARD),
new_genes.to_base64(base64::STANDARD)
);
Genome::new(new_genes)
}
pub fn mutate<R: rand::Rng>(&self, rng: &mut R) -> Self {
let mut new_genes = self.dna.to_vec();
let n_mutations = rng.gen::<usize>() % (new_genes.len() / 8 + 1);
for _ in 0..n_mutations {
let (byte, bit) = split_bit(rng.gen::<usize>() % self.bit_count);
new_genes[byte] ^= 1 << bit;
}
Genome::new(new_genes)
}
pub fn dna_cloned(&self) -> Box<[u8]> { self.dna.clone() }
}
impl fmt::Display for Genome {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.dna.to_base64(base64::STANDARD)) }
}
const BITS_FOR_FLOAT: u8 = 10;
impl Generator for Genome {
fn next_float<T>(&mut self, min: T, max: T) -> T
where T: rand::Rand + num::Float {
let u0 = self.next_bits(BITS_FOR_FLOAT);
let n: T = T::from(u0).unwrap() / T::from(1 << BITS_FOR_FLOAT).unwrap();
n * (max - min) + min
}
fn next_integer<T>(&mut self, min: T, max: T) -> T
where T: rand::Rand + num::Integer + num::ToPrimitive + num::FromPrimitive + Copy {
num::NumCast::from(min)
.and_then(|a| num::NumCast::from(max).map(|b| self.next_i32(a, b)))
.and_then(num::FromPrimitive::from_i32)
.unwrap_or(min)
}
}
#[cfg(test)]
mod tests {}
| poly | identifier_name |
gen.rs | use backend::obj::*;
use csv;
use num;
use rand;
use rand::Rng;
use serialize::base64::{self, FromBase64, ToBase64};
use std::cmp;
use std::f32::consts;
use std::fmt;
use std::slice::Iter;
pub type Dna = Box<[u8]>;
const MAX_POLY_SIDES: u8 = 8; // in conformity with box2d?
fn bit_count(p: usize) -> usize { p << 3 }
fn split_bit(p: usize) -> (usize, u8) { (p >> 3, (p & 0x7) as u8) }
pub struct GenePool {
gene_pool: Box<[Dna]>,
round_robin: usize,
}
impl GenePool {
pub fn gene_pool_iter(&self) -> Iter<Dna> { self.gene_pool.iter() }
pub fn gene_pool_index(&self) -> usize { self.round_robin }
pub fn populate_from_base64(&mut self, base64: &[String], round_robin: usize) {
self.gene_pool =
base64.iter().map(|s| s.from_base64().unwrap().into_boxed_slice()).collect::<Vec<_>>().into_boxed_slice();
self.round_robin = round_robin;
}
pub fn parse_from_base64(base64: &[&str]) -> Self {
GenePool {
gene_pool: base64
.iter()
.map(|s| s.from_base64().unwrap().into_boxed_slice())
.collect::<Vec<_>>()
.into_boxed_slice(),
round_robin: 0,
}
}
pub fn parse_from_resource(data: &[u8]) -> Self {
let mut gene_pool = Vec::new();
let mut csv = csv::Reader::from_bytes(data).has_headers(false);
for row in csv.records() {
let fields = row.unwrap();
gene_pool.push(fields[0].from_base64().unwrap().into_boxed_slice());
}
GenePool { gene_pool: gene_pool.to_vec().into_boxed_slice(), round_robin: 0 }
}
pub fn len(&self) -> usize { self.gene_pool.len() }
#[allow(dead_code)]
pub fn new(gene_pool: &[Dna]) -> Self {
GenePool { gene_pool: gene_pool.to_vec().into_boxed_slice(), round_robin: 0 }
}
pub fn randomize(&mut self) {
let mut rnd = Randomizer::new();
self.gene_pool[self.round_robin] = rnd.seed().dna_cloned();
}
pub fn next(&mut self) -> Genome {
let gen = Genome::copy_from(&self.gene_pool[self.round_robin].clone());
let mutated = gen.mutate(&mut rand::thread_rng());
self.gene_pool[self.round_robin] = mutated.dna_cloned();
self.round_robin = (self.round_robin + 1) % self.gene_pool.len();
gen
}
}
#[allow(dead_code)]
pub trait Generator {
fn next_float<T>(&mut self, min: T, max: T) -> T
where T: rand::Rand + num::Float;
fn next_integer<T>(&mut self, min: T, max: T) -> T
where T: rand::Rand + num::Integer + num::ToPrimitive + num::FromPrimitive + Copy;
fn next_bool(&mut self) -> bool { self.next_integer::<u8>(0, 1) == 1 }
fn ball(&mut self) -> Shape {
let radius: f32 = self.next_float(0.5, 0.75);
Shape::new_ball(radius)
}
fn quad(&mut self) -> Shape {
let radius: f32 = self.next_float(1.0, 2.0);
let ratio: f32 = self.next_float(1.0, 2.0);
Shape::new_box(radius, ratio)
}
fn vbar(&mut self) -> Shape {
let radius: f32 = self.next_float(1.0, 2.0);
let ratio: f32 = self.next_float(0.1, 0.2);
Shape::new_box(radius, ratio)
}
fn triangle(&mut self) -> Shape {
let radius = self.next_float(0.5, 1.0);
let alpha1 = self.next_float(consts::PI * 0.5, consts::PI * 0.8);
let alpha2 = self.next_float(consts::PI * 1.2, consts::PI * 1.5);
Shape::new_triangle(radius, alpha1, alpha2)
}
fn iso_triangle(&mut self) -> Shape {
let radius = self.next_float(0.5, 1.0);
let alpha1 = self.next_float(consts::PI * 0.5, consts::PI * 0.8);
let alpha2 = consts::PI * 2. - alpha1;
Shape::new_triangle(radius, alpha1, alpha2)
}
fn eq_triangle(&mut self) -> Shape {
let radius = self.next_float(0.5, 1.0);
let alpha1 = consts::PI * 2. / 3.;
let alpha2 = consts::PI * 2. - alpha1;
Shape::new_triangle(radius, alpha1, alpha2)
}
fn star(&mut self) -> Shape {
let radius: f32 = self.next_float(1.0, 2.0);
// if pie slices are too small physics freaks out
let n = self.next_integer(3, if radius > 1.5 { MAX_POLY_SIDES } else { MAX_POLY_SIDES - 2 });
let ratio1 = self.next_float(0.5, 1.0);
let ratio2 = self.next_float(0.7, 0.9) * (1. / ratio1);
Shape::new_star(n, radius, ratio1, ratio2)
}
fn poly(&mut self, upside_down: bool) -> Shape {
let n = self.next_integer(3, MAX_POLY_SIDES);
self.npoly(n, upside_down)
}
fn any_poly(&mut self) -> Shape {
let n = self.next_integer(3, MAX_POLY_SIDES);
let upside_down = self.next_bool();
self.npoly(n, upside_down)
}
fn npoly(&mut self, n: AttachmentIndex, upside_down: bool) -> Shape {
let radius: f32 = self.next_float(1.0, 2.0);
let ratio1 = f32::cos(consts::PI / f32::from(n));
let corrected_radius = if upside_down { radius * ratio1 } else { radius };
if n <= MAX_POLY_SIDES {
Shape::new_poly(if upside_down { -1 } else { 1 } * n as i8, corrected_radius)
} else {
let ratio2 = 1. / ratio1;
if upside_down | else {
Shape::new_star(n, corrected_radius, ratio1, ratio2)
}
}
}
}
#[allow(dead_code)]
pub struct Randomizer<R>
where R: rand::Rng {
rng: R,
}
#[allow(dead_code)]
impl Randomizer<rand::ThreadRng> {
pub fn new() -> Randomizer<rand::ThreadRng> { Randomizer { rng: rand::thread_rng() } }
}
impl Generator for Randomizer<rand::ThreadRng> {
fn next_float<T>(&mut self, min: T, max: T) -> T
where T: rand::Rand + num::Float {
self.rng.gen::<T>() * (max - min) + min
}
fn next_integer<T>(&mut self, min: T, max: T) -> T
where T: rand::Rand + num::Integer + Copy {
self.rng.gen::<T>() % (max - min + T::one()) + min
}
}
trait Seeder {
fn seed(&mut self) -> Genome;
}
impl<R> Seeder for Randomizer<R>
where R: rand::Rng
{
fn seed(&mut self) -> Genome {
let mut dna = vec![0u8; 72];
self.rng.fill_bytes(dna.as_mut_slice());
Genome::new(dna)
}
}
#[derive(Clone)]
pub struct Genome {
dna: Box<[u8]>,
ptr: usize,
bit_count: usize,
}
impl Genome {
pub fn copy_from(dna: &[u8]) -> Self {
Genome { ptr: 0, bit_count: bit_count(dna.len()), dna: dna.to_owned().into_boxed_slice() }
}
pub fn new(dna: Vec<u8>) -> Self { Genome { ptr: 0, bit_count: bit_count(dna.len()), dna: dna.into_boxed_slice() } }
#[inline]
fn next_bit(&mut self) -> u8 {
let (byte, bit) = split_bit(self.ptr);
let next = (self.dna[byte] & (1 << bit)) >> bit;
self.ptr = (self.ptr + 1) % self.bit_count;
next
}
#[inline]
fn next_bits(&mut self, n: u8) -> i64 {
//use std::iter;
//iter::repeat_with(|| i64::from(self.next_bit())).take(usize::from(n)).fold(0,
// |a, bit| a << 1 | bit)
(0..n).fold(0, |a, _| a << 1 | i64::from(self.next_bit()))
}
#[inline]
fn count_bits(d: u64) -> u8 { (64 - d.leading_zeros()) as u8 }
fn next_i32(&mut self, min: i32, max: i32) -> i32 {
let diff = i64::from(max) - i64::from(min) + 1i64;
if diff <= 0 {
min
} else {
(self.next_bits(Self::count_bits(diff as u64)) % diff + i64::from(min)) as i32
}
}
pub fn crossover<R: rand::Rng>(&self, rng: &mut R, other: &Dna) -> Self {
let len = cmp::min(self.bit_count, bit_count(other.len()));
let (byte, bit) = split_bit(rng.gen::<usize>() % len);
let flip_mask = if rng.gen::<bool>() { 0xffu8 } else { 0x0u8 };
let mut new_genes = self.dna.to_vec();
for i in 0..len / 8 {
let a = new_genes[i];
let b = other[i];
let mask = if i < byte || (bit == 0 && i == byte) {
0x00u8
} else if i > byte {
0xffu8
} else {
(0xffu8 >> (8 - bit)) as u8
} ^ flip_mask;
new_genes[i] = (mask & a) | (!mask & b);
}
debug!(
"crossover at {}/{}: {} * {} -> {}",
byte,
bit,
self.dna.to_base64(base64::STANDARD),
other.to_base64(base64::STANDARD),
new_genes.to_base64(base64::STANDARD)
);
Genome::new(new_genes)
}
pub fn mutate<R: rand::Rng>(&self, rng: &mut R) -> Self {
let mut new_genes = self.dna.to_vec();
let n_mutations = rng.gen::<usize>() % (new_genes.len() / 8 + 1);
for _ in 0..n_mutations {
let (byte, bit) = split_bit(rng.gen::<usize>() % self.bit_count);
new_genes[byte] ^= 1 << bit;
}
Genome::new(new_genes)
}
pub fn dna_cloned(&self) -> Box<[u8]> { self.dna.clone() }
}
impl fmt::Display for Genome {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.dna.to_base64(base64::STANDARD)) }
}
const BITS_FOR_FLOAT: u8 = 10;
impl Generator for Genome {
fn next_float<T>(&mut self, min: T, max: T) -> T
where T: rand::Rand + num::Float {
let u0 = self.next_bits(BITS_FOR_FLOAT);
let n: T = T::from(u0).unwrap() / T::from(1 << BITS_FOR_FLOAT).unwrap();
n * (max - min) + min
}
fn next_integer<T>(&mut self, min: T, max: T) -> T
where T: rand::Rand + num::Integer + num::ToPrimitive + num::FromPrimitive + Copy {
num::NumCast::from(min)
.and_then(|a| num::NumCast::from(max).map(|b| self.next_i32(a, b)))
.and_then(num::FromPrimitive::from_i32)
.unwrap_or(min)
}
}
#[cfg(test)]
mod tests {}
| {
Shape::new_star(n, corrected_radius, ratio2, ratio1)
} | conditional_block |
gen.rs | use backend::obj::*;
use csv;
use num;
use rand;
use rand::Rng;
use serialize::base64::{self, FromBase64, ToBase64};
use std::cmp;
use std::f32::consts;
use std::fmt;
use std::slice::Iter;
pub type Dna = Box<[u8]>;
const MAX_POLY_SIDES: u8 = 8; // in conformity with box2d?
fn bit_count(p: usize) -> usize { p << 3 }
fn split_bit(p: usize) -> (usize, u8) { (p >> 3, (p & 0x7) as u8) }
pub struct GenePool {
gene_pool: Box<[Dna]>,
round_robin: usize,
}
impl GenePool {
pub fn gene_pool_iter(&self) -> Iter<Dna> { self.gene_pool.iter() }
pub fn gene_pool_index(&self) -> usize { self.round_robin }
pub fn populate_from_base64(&mut self, base64: &[String], round_robin: usize) {
self.gene_pool =
base64.iter().map(|s| s.from_base64().unwrap().into_boxed_slice()).collect::<Vec<_>>().into_boxed_slice();
self.round_robin = round_robin;
}
pub fn parse_from_base64(base64: &[&str]) -> Self {
GenePool {
gene_pool: base64
.iter()
.map(|s| s.from_base64().unwrap().into_boxed_slice())
.collect::<Vec<_>>()
.into_boxed_slice(),
round_robin: 0,
}
}
pub fn parse_from_resource(data: &[u8]) -> Self {
let mut gene_pool = Vec::new();
let mut csv = csv::Reader::from_bytes(data).has_headers(false);
for row in csv.records() {
let fields = row.unwrap();
gene_pool.push(fields[0].from_base64().unwrap().into_boxed_slice());
}
GenePool { gene_pool: gene_pool.to_vec().into_boxed_slice(), round_robin: 0 }
}
pub fn len(&self) -> usize { self.gene_pool.len() }
#[allow(dead_code)]
pub fn new(gene_pool: &[Dna]) -> Self {
GenePool { gene_pool: gene_pool.to_vec().into_boxed_slice(), round_robin: 0 }
}
pub fn randomize(&mut self) {
let mut rnd = Randomizer::new();
self.gene_pool[self.round_robin] = rnd.seed().dna_cloned();
}
pub fn next(&mut self) -> Genome {
let gen = Genome::copy_from(&self.gene_pool[self.round_robin].clone());
let mutated = gen.mutate(&mut rand::thread_rng());
self.gene_pool[self.round_robin] = mutated.dna_cloned();
self.round_robin = (self.round_robin + 1) % self.gene_pool.len();
gen
}
}
#[allow(dead_code)]
pub trait Generator {
fn next_float<T>(&mut self, min: T, max: T) -> T
where T: rand::Rand + num::Float;
fn next_integer<T>(&mut self, min: T, max: T) -> T
where T: rand::Rand + num::Integer + num::ToPrimitive + num::FromPrimitive + Copy;
fn next_bool(&mut self) -> bool { self.next_integer::<u8>(0, 1) == 1 }
fn ball(&mut self) -> Shape {
let radius: f32 = self.next_float(0.5, 0.75);
Shape::new_ball(radius)
}
fn quad(&mut self) -> Shape {
let radius: f32 = self.next_float(1.0, 2.0);
let ratio: f32 = self.next_float(1.0, 2.0);
Shape::new_box(radius, ratio)
}
fn vbar(&mut self) -> Shape {
let radius: f32 = self.next_float(1.0, 2.0);
let ratio: f32 = self.next_float(0.1, 0.2);
Shape::new_box(radius, ratio)
}
fn triangle(&mut self) -> Shape {
let radius = self.next_float(0.5, 1.0);
let alpha1 = self.next_float(consts::PI * 0.5, consts::PI * 0.8);
let alpha2 = self.next_float(consts::PI * 1.2, consts::PI * 1.5);
Shape::new_triangle(radius, alpha1, alpha2)
}
fn iso_triangle(&mut self) -> Shape {
let radius = self.next_float(0.5, 1.0);
let alpha1 = self.next_float(consts::PI * 0.5, consts::PI * 0.8);
let alpha2 = consts::PI * 2. - alpha1;
Shape::new_triangle(radius, alpha1, alpha2)
}
fn eq_triangle(&mut self) -> Shape {
let radius = self.next_float(0.5, 1.0);
let alpha1 = consts::PI * 2. / 3.;
let alpha2 = consts::PI * 2. - alpha1;
Shape::new_triangle(radius, alpha1, alpha2)
}
fn star(&mut self) -> Shape {
let radius: f32 = self.next_float(1.0, 2.0); | Shape::new_star(n, radius, ratio1, ratio2)
}
fn poly(&mut self, upside_down: bool) -> Shape {
let n = self.next_integer(3, MAX_POLY_SIDES);
self.npoly(n, upside_down)
}
fn any_poly(&mut self) -> Shape {
let n = self.next_integer(3, MAX_POLY_SIDES);
let upside_down = self.next_bool();
self.npoly(n, upside_down)
}
fn npoly(&mut self, n: AttachmentIndex, upside_down: bool) -> Shape {
let radius: f32 = self.next_float(1.0, 2.0);
let ratio1 = f32::cos(consts::PI / f32::from(n));
let corrected_radius = if upside_down { radius * ratio1 } else { radius };
if n <= MAX_POLY_SIDES {
Shape::new_poly(if upside_down { -1 } else { 1 } * n as i8, corrected_radius)
} else {
let ratio2 = 1. / ratio1;
if upside_down {
Shape::new_star(n, corrected_radius, ratio2, ratio1)
} else {
Shape::new_star(n, corrected_radius, ratio1, ratio2)
}
}
}
}
#[allow(dead_code)]
pub struct Randomizer<R>
where R: rand::Rng {
rng: R,
}
#[allow(dead_code)]
impl Randomizer<rand::ThreadRng> {
pub fn new() -> Randomizer<rand::ThreadRng> { Randomizer { rng: rand::thread_rng() } }
}
impl Generator for Randomizer<rand::ThreadRng> {
fn next_float<T>(&mut self, min: T, max: T) -> T
where T: rand::Rand + num::Float {
self.rng.gen::<T>() * (max - min) + min
}
fn next_integer<T>(&mut self, min: T, max: T) -> T
where T: rand::Rand + num::Integer + Copy {
self.rng.gen::<T>() % (max - min + T::one()) + min
}
}
trait Seeder {
fn seed(&mut self) -> Genome;
}
impl<R> Seeder for Randomizer<R>
where R: rand::Rng
{
fn seed(&mut self) -> Genome {
let mut dna = vec![0u8; 72];
self.rng.fill_bytes(dna.as_mut_slice());
Genome::new(dna)
}
}
#[derive(Clone)]
pub struct Genome {
dna: Box<[u8]>,
ptr: usize,
bit_count: usize,
}
impl Genome {
pub fn copy_from(dna: &[u8]) -> Self {
Genome { ptr: 0, bit_count: bit_count(dna.len()), dna: dna.to_owned().into_boxed_slice() }
}
pub fn new(dna: Vec<u8>) -> Self { Genome { ptr: 0, bit_count: bit_count(dna.len()), dna: dna.into_boxed_slice() } }
#[inline]
fn next_bit(&mut self) -> u8 {
let (byte, bit) = split_bit(self.ptr);
let next = (self.dna[byte] & (1 << bit)) >> bit;
self.ptr = (self.ptr + 1) % self.bit_count;
next
}
#[inline]
fn next_bits(&mut self, n: u8) -> i64 {
//use std::iter;
//iter::repeat_with(|| i64::from(self.next_bit())).take(usize::from(n)).fold(0,
// |a, bit| a << 1 | bit)
(0..n).fold(0, |a, _| a << 1 | i64::from(self.next_bit()))
}
#[inline]
fn count_bits(d: u64) -> u8 { (64 - d.leading_zeros()) as u8 }
fn next_i32(&mut self, min: i32, max: i32) -> i32 {
let diff = i64::from(max) - i64::from(min) + 1i64;
if diff <= 0 {
min
} else {
(self.next_bits(Self::count_bits(diff as u64)) % diff + i64::from(min)) as i32
}
}
pub fn crossover<R: rand::Rng>(&self, rng: &mut R, other: &Dna) -> Self {
let len = cmp::min(self.bit_count, bit_count(other.len()));
let (byte, bit) = split_bit(rng.gen::<usize>() % len);
let flip_mask = if rng.gen::<bool>() { 0xffu8 } else { 0x0u8 };
let mut new_genes = self.dna.to_vec();
for i in 0..len / 8 {
let a = new_genes[i];
let b = other[i];
let mask = if i < byte || (bit == 0 && i == byte) {
0x00u8
} else if i > byte {
0xffu8
} else {
(0xffu8 >> (8 - bit)) as u8
} ^ flip_mask;
new_genes[i] = (mask & a) | (!mask & b);
}
debug!(
"crossover at {}/{}: {} * {} -> {}",
byte,
bit,
self.dna.to_base64(base64::STANDARD),
other.to_base64(base64::STANDARD),
new_genes.to_base64(base64::STANDARD)
);
Genome::new(new_genes)
}
pub fn mutate<R: rand::Rng>(&self, rng: &mut R) -> Self {
let mut new_genes = self.dna.to_vec();
let n_mutations = rng.gen::<usize>() % (new_genes.len() / 8 + 1);
for _ in 0..n_mutations {
let (byte, bit) = split_bit(rng.gen::<usize>() % self.bit_count);
new_genes[byte] ^= 1 << bit;
}
Genome::new(new_genes)
}
pub fn dna_cloned(&self) -> Box<[u8]> { self.dna.clone() }
}
impl fmt::Display for Genome {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.dna.to_base64(base64::STANDARD)) }
}
const BITS_FOR_FLOAT: u8 = 10;
impl Generator for Genome {
fn next_float<T>(&mut self, min: T, max: T) -> T
where T: rand::Rand + num::Float {
let u0 = self.next_bits(BITS_FOR_FLOAT);
let n: T = T::from(u0).unwrap() / T::from(1 << BITS_FOR_FLOAT).unwrap();
n * (max - min) + min
}
fn next_integer<T>(&mut self, min: T, max: T) -> T
where T: rand::Rand + num::Integer + num::ToPrimitive + num::FromPrimitive + Copy {
num::NumCast::from(min)
.and_then(|a| num::NumCast::from(max).map(|b| self.next_i32(a, b)))
.and_then(num::FromPrimitive::from_i32)
.unwrap_or(min)
}
}
#[cfg(test)]
mod tests {} | // if pie slices are too small physics freaks out
let n = self.next_integer(3, if radius > 1.5 { MAX_POLY_SIDES } else { MAX_POLY_SIDES - 2 });
let ratio1 = self.next_float(0.5, 1.0);
let ratio2 = self.next_float(0.7, 0.9) * (1. / ratio1); | random_line_split |
type-path-err-node-types.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Type arguments in unresolved entities (reporting errors before type checking)
// should have their types recorded.
trait Tr<T> {}
fn local_type() {
let _: Nonexistent<u8, Assoc = u16>; //~ ERROR cannot find type `Nonexistent` in this scope
}
fn ufcs_trait() |
fn ufcs_item() {
NonExistent::Assoc::<u8>; //~ ERROR undeclared type or module `NonExistent`
}
fn method() {
nonexistent.nonexistent::<u8>(); //~ ERROR cannot find value `nonexistent`
}
fn closure() {
let _ = |a, b: _| -> _ { 0 }; // OK
}
fn main() {}
| {
<u8 as Tr<u8>>::nonexistent(); //~ ERROR cannot find method or associated constant `nonexistent`
} | identifier_body |
type-path-err-node-types.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Type arguments in unresolved entities (reporting errors before type checking)
// should have their types recorded.
trait Tr<T> {}
fn local_type() {
let _: Nonexistent<u8, Assoc = u16>; //~ ERROR cannot find type `Nonexistent` in this scope
}
fn ufcs_trait() {
<u8 as Tr<u8>>::nonexistent(); //~ ERROR cannot find method or associated constant `nonexistent`
}
fn ufcs_item() {
NonExistent::Assoc::<u8>; //~ ERROR undeclared type or module `NonExistent`
}
fn method() { | nonexistent.nonexistent::<u8>(); //~ ERROR cannot find value `nonexistent`
}
fn closure() {
let _ = |a, b: _| -> _ { 0 }; // OK
}
fn main() {} | random_line_split |
|
type-path-err-node-types.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Type arguments in unresolved entities (reporting errors before type checking)
// should have their types recorded.
trait Tr<T> {}
fn local_type() {
let _: Nonexistent<u8, Assoc = u16>; //~ ERROR cannot find type `Nonexistent` in this scope
}
fn | () {
<u8 as Tr<u8>>::nonexistent(); //~ ERROR cannot find method or associated constant `nonexistent`
}
fn ufcs_item() {
NonExistent::Assoc::<u8>; //~ ERROR undeclared type or module `NonExistent`
}
fn method() {
nonexistent.nonexistent::<u8>(); //~ ERROR cannot find value `nonexistent`
}
fn closure() {
let _ = |a, b: _| -> _ { 0 }; // OK
}
fn main() {}
| ufcs_trait | identifier_name |
role.rs | use std::cmp::Ordering;
use std::fmt::{Display, Formatter, Result as FmtResult};
use model::*;
#[cfg(feature = "cache")]
use CACHE;
#[cfg(all(feature = "builder", feature = "cache", feature = "model"))]
use builder::EditRole;
#[cfg(feature = "cache")]
use internal::prelude::*;
#[cfg(all(feature = "cache", feature = "model"))]
use http;
/// Information about a role within a guild. A role represents a set of
/// permissions, and can be attached to one or multiple users. A role has
/// various miscellaneous configurations, such as being assigned a colour. Roles
/// are unique per guild and do not cross over to other guilds in any way, and
/// can have channel-specific permission overrides in addition to guild-level
/// permissions.
#[derive(Clone, Debug, Deserialize)]
pub struct Role {
/// The Id of the role. Can be used to calculate the role's creation date.
pub id: RoleId,
/// The colour of the role. This is an ergonomic representation of the inner
/// value.
#[cfg(feature = "utils")]
#[serde(rename = "color")]
pub colour: Colour,
/// The colour of the role.
#[cfg(not(feature = "utils"))]
#[serde(rename = "color")]
pub colour: u32,
/// Indicator of whether the role is pinned above lesser roles.
///
/// In the client, this causes [`Member`]s in the role to be seen above
/// those in roles with a lower [`position`].
///
/// [`Member`]: struct.Member.html
/// [`position`]: #structfield.position
pub hoist: bool,
/// Indicator of whether the role is managed by an integration service.
pub managed: bool,
/// Indicator of whether the role can be mentioned, similar to mentioning a
/// specific member or `@everyone`.
///
/// Only members of the role will be notified if a role is mentioned with
/// this set to `true`.
#[serde(default)]
pub mentionable: bool,
/// The name of the role.
pub name: String,
/// A set of permissions that the role has been assigned.
///
/// See the [`permissions`] module for more information.
///
/// [`permissions`]: permissions/index.html
pub permissions: Permissions,
/// The role's position in the position list. Roles are considered higher in
/// hierarchy if their position is higher.
///
/// The `@everyone` role is usually either `-1` or `0`.
pub position: i64,
}
#[cfg(feature = "model")]
impl Role {
/// Deletes the role.
///
/// **Note** Requires the [Manage Roles] permission.
///
/// [Manage Roles]: permissions/constant.MANAGE_ROLES.html
#[cfg(feature = "cache")]
#[inline]
pub fn delete(&self) -> Result<()> { http::delete_role(self.find_guild()?.0, self.id.0) }
/// Edits a [`Role`], optionally setting its new fields.
///
/// Requires the [Manage Roles] permission.
///
/// # Examples
///
/// Make a role hoisted:
///
/// ```rust,no_run
/// # use serenity::model::RoleId;
/// # let role = RoleId(7).find().unwrap();
/// // assuming a `role` has already been bound
//
/// role.edit(|r| r.hoist(true));
/// ```
///
/// [`Role`]: struct.Role.html
/// [Manage Roles]: permissions/constant.MANAGE_ROLES.html
#[cfg(all(feature = "builder", feature = "cache"))]
pub fn edit<F: FnOnce(EditRole) -> EditRole>(&self, f: F) -> Result<Role> {
self.find_guild().and_then(
|guild_id| guild_id.edit_role(self.id, f),
)
}
/// Searches the cache for the guild that owns the role.
///
/// # Errors
///
/// Returns a [`ModelError::GuildNotFound`] if a guild is not in the cache
/// that contains the role.
///
/// [`ModelError::GuildNotFound`]: enum.ModelError.html#variant.GuildNotFound
#[cfg(feature = "cache")]
pub fn find_guild(&self) -> Result<GuildId> {
for guild in CACHE.read().unwrap().guilds.values() {
let guild = guild.read().unwrap();
if guild.roles.contains_key(&RoleId(self.id.0)) {
return Ok(guild.id);
}
}
Err(Error::Model(ModelError::GuildNotFound))
}
/// Check that the role has the given permission. | /// Checks whether the role has all of the given permissions.
///
/// The 'precise' argument is used to check if the role's permissions are
/// precisely equivalent to the given permissions. If you need only check
/// that the role has at least the given permissions, pass `false`.
pub fn has_permissions(&self, permissions: Permissions, precise: bool) -> bool {
if precise {
self.permissions == permissions
} else {
self.permissions.contains(permissions)
}
}
}
impl Display for Role {
/// Format a mention for the role, pinging its members.
// This is in the format of: `<@&ROLE_ID>`.
fn fmt(&self, f: &mut Formatter) -> FmtResult { Display::fmt(&self.mention(), f) }
}
impl Eq for Role {}
impl Ord for Role {
fn cmp(&self, other: &Role) -> Ordering {
if self.position == other.position {
self.id.cmp(&other.id)
} else {
self.position.cmp(&other.position)
}
}
}
impl PartialEq for Role {
fn eq(&self, other: &Role) -> bool { self.id == other.id }
}
impl PartialOrd for Role {
fn partial_cmp(&self, other: &Role) -> Option<Ordering> { Some(self.cmp(other)) }
}
#[cfg(feature = "model")]
impl RoleId {
/// Search the cache for the role.
#[cfg(feature = "cache")]
pub fn find(&self) -> Option<Role> {
let cache = CACHE.read().unwrap();
for guild in cache.guilds.values() {
let guild = guild.read().unwrap();
if!guild.roles.contains_key(self) {
continue;
}
if let Some(role) = guild.roles.get(self) {
return Some(role.clone());
}
}
None
}
}
impl Display for RoleId {
fn fmt(&self, f: &mut Formatter) -> FmtResult { Display::fmt(&self.0, f) }
}
impl From<Role> for RoleId {
/// Gets the Id of a role.
fn from(role: Role) -> RoleId { role.id }
}
impl<'a> From<&'a Role> for RoleId {
/// Gets the Id of a role.
fn from(role: &Role) -> RoleId { role.id }
} | #[inline]
pub fn has_permission(&self, permission: Permissions) -> bool {
self.permissions.contains(permission)
}
| random_line_split |
role.rs | use std::cmp::Ordering;
use std::fmt::{Display, Formatter, Result as FmtResult};
use model::*;
#[cfg(feature = "cache")]
use CACHE;
#[cfg(all(feature = "builder", feature = "cache", feature = "model"))]
use builder::EditRole;
#[cfg(feature = "cache")]
use internal::prelude::*;
#[cfg(all(feature = "cache", feature = "model"))]
use http;
/// Information about a role within a guild. A role represents a set of
/// permissions, and can be attached to one or multiple users. A role has
/// various miscellaneous configurations, such as being assigned a colour. Roles
/// are unique per guild and do not cross over to other guilds in any way, and
/// can have channel-specific permission overrides in addition to guild-level
/// permissions.
#[derive(Clone, Debug, Deserialize)]
pub struct Role {
/// The Id of the role. Can be used to calculate the role's creation date.
pub id: RoleId,
/// The colour of the role. This is an ergonomic representation of the inner
/// value.
#[cfg(feature = "utils")]
#[serde(rename = "color")]
pub colour: Colour,
/// The colour of the role.
#[cfg(not(feature = "utils"))]
#[serde(rename = "color")]
pub colour: u32,
/// Indicator of whether the role is pinned above lesser roles.
///
/// In the client, this causes [`Member`]s in the role to be seen above
/// those in roles with a lower [`position`].
///
/// [`Member`]: struct.Member.html
/// [`position`]: #structfield.position
pub hoist: bool,
/// Indicator of whether the role is managed by an integration service.
pub managed: bool,
/// Indicator of whether the role can be mentioned, similar to mentioning a
/// specific member or `@everyone`.
///
/// Only members of the role will be notified if a role is mentioned with
/// this set to `true`.
#[serde(default)]
pub mentionable: bool,
/// The name of the role.
pub name: String,
/// A set of permissions that the role has been assigned.
///
/// See the [`permissions`] module for more information.
///
/// [`permissions`]: permissions/index.html
pub permissions: Permissions,
/// The role's position in the position list. Roles are considered higher in
/// hierarchy if their position is higher.
///
/// The `@everyone` role is usually either `-1` or `0`.
pub position: i64,
}
#[cfg(feature = "model")]
impl Role {
/// Deletes the role.
///
/// **Note** Requires the [Manage Roles] permission.
///
/// [Manage Roles]: permissions/constant.MANAGE_ROLES.html
#[cfg(feature = "cache")]
#[inline]
pub fn delete(&self) -> Result<()> { http::delete_role(self.find_guild()?.0, self.id.0) }
/// Edits a [`Role`], optionally setting its new fields.
///
/// Requires the [Manage Roles] permission.
///
/// # Examples
///
/// Make a role hoisted:
///
/// ```rust,no_run
/// # use serenity::model::RoleId;
/// # let role = RoleId(7).find().unwrap();
/// // assuming a `role` has already been bound
//
/// role.edit(|r| r.hoist(true));
/// ```
///
/// [`Role`]: struct.Role.html
/// [Manage Roles]: permissions/constant.MANAGE_ROLES.html
#[cfg(all(feature = "builder", feature = "cache"))]
pub fn edit<F: FnOnce(EditRole) -> EditRole>(&self, f: F) -> Result<Role> {
self.find_guild().and_then(
|guild_id| guild_id.edit_role(self.id, f),
)
}
/// Searches the cache for the guild that owns the role.
///
/// # Errors
///
/// Returns a [`ModelError::GuildNotFound`] if a guild is not in the cache
/// that contains the role.
///
/// [`ModelError::GuildNotFound`]: enum.ModelError.html#variant.GuildNotFound
#[cfg(feature = "cache")]
pub fn find_guild(&self) -> Result<GuildId> {
for guild in CACHE.read().unwrap().guilds.values() {
let guild = guild.read().unwrap();
if guild.roles.contains_key(&RoleId(self.id.0)) {
return Ok(guild.id);
}
}
Err(Error::Model(ModelError::GuildNotFound))
}
/// Check that the role has the given permission.
#[inline]
pub fn has_permission(&self, permission: Permissions) -> bool {
self.permissions.contains(permission)
}
/// Checks whether the role has all of the given permissions.
///
/// The 'precise' argument is used to check if the role's permissions are
/// precisely equivalent to the given permissions. If you need only check
/// that the role has at least the given permissions, pass `false`.
pub fn has_permissions(&self, permissions: Permissions, precise: bool) -> bool {
if precise {
self.permissions == permissions
} else {
self.permissions.contains(permissions)
}
}
}
impl Display for Role {
/// Format a mention for the role, pinging its members.
// This is in the format of: `<@&ROLE_ID>`.
fn fmt(&self, f: &mut Formatter) -> FmtResult { Display::fmt(&self.mention(), f) }
}
impl Eq for Role {}
impl Ord for Role {
fn cmp(&self, other: &Role) -> Ordering {
if self.position == other.position {
self.id.cmp(&other.id)
} else {
self.position.cmp(&other.position)
}
}
}
impl PartialEq for Role {
fn eq(&self, other: &Role) -> bool { self.id == other.id }
}
impl PartialOrd for Role {
fn partial_cmp(&self, other: &Role) -> Option<Ordering> { Some(self.cmp(other)) }
}
#[cfg(feature = "model")]
impl RoleId {
/// Search the cache for the role.
#[cfg(feature = "cache")]
pub fn find(&self) -> Option<Role> {
let cache = CACHE.read().unwrap();
for guild in cache.guilds.values() {
let guild = guild.read().unwrap();
if!guild.roles.contains_key(self) {
continue;
}
if let Some(role) = guild.roles.get(self) {
return Some(role.clone());
}
}
None
}
}
impl Display for RoleId {
fn fmt(&self, f: &mut Formatter) -> FmtResult { Display::fmt(&self.0, f) }
}
impl From<Role> for RoleId {
/// Gets the Id of a role.
fn from(role: Role) -> RoleId |
}
impl<'a> From<&'a Role> for RoleId {
/// Gets the Id of a role.
fn from(role: &Role) -> RoleId { role.id }
}
| { role.id } | identifier_body |
role.rs | use std::cmp::Ordering;
use std::fmt::{Display, Formatter, Result as FmtResult};
use model::*;
#[cfg(feature = "cache")]
use CACHE;
#[cfg(all(feature = "builder", feature = "cache", feature = "model"))]
use builder::EditRole;
#[cfg(feature = "cache")]
use internal::prelude::*;
#[cfg(all(feature = "cache", feature = "model"))]
use http;
/// Information about a role within a guild. A role represents a set of
/// permissions, and can be attached to one or multiple users. A role has
/// various miscellaneous configurations, such as being assigned a colour. Roles
/// are unique per guild and do not cross over to other guilds in any way, and
/// can have channel-specific permission overrides in addition to guild-level
/// permissions.
#[derive(Clone, Debug, Deserialize)]
pub struct Role {
/// The Id of the role. Can be used to calculate the role's creation date.
pub id: RoleId,
/// The colour of the role. This is an ergonomic representation of the inner
/// value.
#[cfg(feature = "utils")]
#[serde(rename = "color")]
pub colour: Colour,
/// The colour of the role.
#[cfg(not(feature = "utils"))]
#[serde(rename = "color")]
pub colour: u32,
/// Indicator of whether the role is pinned above lesser roles.
///
/// In the client, this causes [`Member`]s in the role to be seen above
/// those in roles with a lower [`position`].
///
/// [`Member`]: struct.Member.html
/// [`position`]: #structfield.position
pub hoist: bool,
/// Indicator of whether the role is managed by an integration service.
pub managed: bool,
/// Indicator of whether the role can be mentioned, similar to mentioning a
/// specific member or `@everyone`.
///
/// Only members of the role will be notified if a role is mentioned with
/// this set to `true`.
#[serde(default)]
pub mentionable: bool,
/// The name of the role.
pub name: String,
/// A set of permissions that the role has been assigned.
///
/// See the [`permissions`] module for more information.
///
/// [`permissions`]: permissions/index.html
pub permissions: Permissions,
/// The role's position in the position list. Roles are considered higher in
/// hierarchy if their position is higher.
///
/// The `@everyone` role is usually either `-1` or `0`.
pub position: i64,
}
#[cfg(feature = "model")]
impl Role {
/// Deletes the role.
///
/// **Note** Requires the [Manage Roles] permission.
///
/// [Manage Roles]: permissions/constant.MANAGE_ROLES.html
#[cfg(feature = "cache")]
#[inline]
pub fn delete(&self) -> Result<()> { http::delete_role(self.find_guild()?.0, self.id.0) }
/// Edits a [`Role`], optionally setting its new fields.
///
/// Requires the [Manage Roles] permission.
///
/// # Examples
///
/// Make a role hoisted:
///
/// ```rust,no_run
/// # use serenity::model::RoleId;
/// # let role = RoleId(7).find().unwrap();
/// // assuming a `role` has already been bound
//
/// role.edit(|r| r.hoist(true));
/// ```
///
/// [`Role`]: struct.Role.html
/// [Manage Roles]: permissions/constant.MANAGE_ROLES.html
#[cfg(all(feature = "builder", feature = "cache"))]
pub fn edit<F: FnOnce(EditRole) -> EditRole>(&self, f: F) -> Result<Role> {
self.find_guild().and_then(
|guild_id| guild_id.edit_role(self.id, f),
)
}
/// Searches the cache for the guild that owns the role.
///
/// # Errors
///
/// Returns a [`ModelError::GuildNotFound`] if a guild is not in the cache
/// that contains the role.
///
/// [`ModelError::GuildNotFound`]: enum.ModelError.html#variant.GuildNotFound
#[cfg(feature = "cache")]
pub fn find_guild(&self) -> Result<GuildId> {
for guild in CACHE.read().unwrap().guilds.values() {
let guild = guild.read().unwrap();
if guild.roles.contains_key(&RoleId(self.id.0)) {
return Ok(guild.id);
}
}
Err(Error::Model(ModelError::GuildNotFound))
}
/// Check that the role has the given permission.
#[inline]
pub fn has_permission(&self, permission: Permissions) -> bool {
self.permissions.contains(permission)
}
/// Checks whether the role has all of the given permissions.
///
/// The 'precise' argument is used to check if the role's permissions are
/// precisely equivalent to the given permissions. If you need only check
/// that the role has at least the given permissions, pass `false`.
pub fn has_permissions(&self, permissions: Permissions, precise: bool) -> bool {
if precise {
self.permissions == permissions
} else {
self.permissions.contains(permissions)
}
}
}
impl Display for Role {
/// Format a mention for the role, pinging its members.
// This is in the format of: `<@&ROLE_ID>`.
fn fmt(&self, f: &mut Formatter) -> FmtResult { Display::fmt(&self.mention(), f) }
}
impl Eq for Role {}
impl Ord for Role {
fn cmp(&self, other: &Role) -> Ordering {
if self.position == other.position {
self.id.cmp(&other.id)
} else {
self.position.cmp(&other.position)
}
}
}
impl PartialEq for Role {
fn eq(&self, other: &Role) -> bool { self.id == other.id }
}
impl PartialOrd for Role {
fn | (&self, other: &Role) -> Option<Ordering> { Some(self.cmp(other)) }
}
#[cfg(feature = "model")]
impl RoleId {
/// Search the cache for the role.
#[cfg(feature = "cache")]
pub fn find(&self) -> Option<Role> {
let cache = CACHE.read().unwrap();
for guild in cache.guilds.values() {
let guild = guild.read().unwrap();
if!guild.roles.contains_key(self) {
continue;
}
if let Some(role) = guild.roles.get(self) {
return Some(role.clone());
}
}
None
}
}
impl Display for RoleId {
fn fmt(&self, f: &mut Formatter) -> FmtResult { Display::fmt(&self.0, f) }
}
impl From<Role> for RoleId {
/// Gets the Id of a role.
fn from(role: Role) -> RoleId { role.id }
}
impl<'a> From<&'a Role> for RoleId {
/// Gets the Id of a role.
fn from(role: &Role) -> RoleId { role.id }
}
| partial_cmp | identifier_name |
role.rs | use std::cmp::Ordering;
use std::fmt::{Display, Formatter, Result as FmtResult};
use model::*;
#[cfg(feature = "cache")]
use CACHE;
#[cfg(all(feature = "builder", feature = "cache", feature = "model"))]
use builder::EditRole;
#[cfg(feature = "cache")]
use internal::prelude::*;
#[cfg(all(feature = "cache", feature = "model"))]
use http;
/// Information about a role within a guild. A role represents a set of
/// permissions, and can be attached to one or multiple users. A role has
/// various miscellaneous configurations, such as being assigned a colour. Roles
/// are unique per guild and do not cross over to other guilds in any way, and
/// can have channel-specific permission overrides in addition to guild-level
/// permissions.
#[derive(Clone, Debug, Deserialize)]
pub struct Role {
/// The Id of the role. Can be used to calculate the role's creation date.
pub id: RoleId,
/// The colour of the role. This is an ergonomic representation of the inner
/// value.
#[cfg(feature = "utils")]
#[serde(rename = "color")]
pub colour: Colour,
/// The colour of the role.
#[cfg(not(feature = "utils"))]
#[serde(rename = "color")]
pub colour: u32,
/// Indicator of whether the role is pinned above lesser roles.
///
/// In the client, this causes [`Member`]s in the role to be seen above
/// those in roles with a lower [`position`].
///
/// [`Member`]: struct.Member.html
/// [`position`]: #structfield.position
pub hoist: bool,
/// Indicator of whether the role is managed by an integration service.
pub managed: bool,
/// Indicator of whether the role can be mentioned, similar to mentioning a
/// specific member or `@everyone`.
///
/// Only members of the role will be notified if a role is mentioned with
/// this set to `true`.
#[serde(default)]
pub mentionable: bool,
/// The name of the role.
pub name: String,
/// A set of permissions that the role has been assigned.
///
/// See the [`permissions`] module for more information.
///
/// [`permissions`]: permissions/index.html
pub permissions: Permissions,
/// The role's position in the position list. Roles are considered higher in
/// hierarchy if their position is higher.
///
/// The `@everyone` role is usually either `-1` or `0`.
pub position: i64,
}
#[cfg(feature = "model")]
impl Role {
/// Deletes the role.
///
/// **Note** Requires the [Manage Roles] permission.
///
/// [Manage Roles]: permissions/constant.MANAGE_ROLES.html
#[cfg(feature = "cache")]
#[inline]
pub fn delete(&self) -> Result<()> { http::delete_role(self.find_guild()?.0, self.id.0) }
/// Edits a [`Role`], optionally setting its new fields.
///
/// Requires the [Manage Roles] permission.
///
/// # Examples
///
/// Make a role hoisted:
///
/// ```rust,no_run
/// # use serenity::model::RoleId;
/// # let role = RoleId(7).find().unwrap();
/// // assuming a `role` has already been bound
//
/// role.edit(|r| r.hoist(true));
/// ```
///
/// [`Role`]: struct.Role.html
/// [Manage Roles]: permissions/constant.MANAGE_ROLES.html
#[cfg(all(feature = "builder", feature = "cache"))]
pub fn edit<F: FnOnce(EditRole) -> EditRole>(&self, f: F) -> Result<Role> {
self.find_guild().and_then(
|guild_id| guild_id.edit_role(self.id, f),
)
}
/// Searches the cache for the guild that owns the role.
///
/// # Errors
///
/// Returns a [`ModelError::GuildNotFound`] if a guild is not in the cache
/// that contains the role.
///
/// [`ModelError::GuildNotFound`]: enum.ModelError.html#variant.GuildNotFound
#[cfg(feature = "cache")]
pub fn find_guild(&self) -> Result<GuildId> {
for guild in CACHE.read().unwrap().guilds.values() {
let guild = guild.read().unwrap();
if guild.roles.contains_key(&RoleId(self.id.0)) {
return Ok(guild.id);
}
}
Err(Error::Model(ModelError::GuildNotFound))
}
/// Check that the role has the given permission.
#[inline]
pub fn has_permission(&self, permission: Permissions) -> bool {
self.permissions.contains(permission)
}
/// Checks whether the role has all of the given permissions.
///
/// The 'precise' argument is used to check if the role's permissions are
/// precisely equivalent to the given permissions. If you need only check
/// that the role has at least the given permissions, pass `false`.
pub fn has_permissions(&self, permissions: Permissions, precise: bool) -> bool {
if precise {
self.permissions == permissions
} else {
self.permissions.contains(permissions)
}
}
}
impl Display for Role {
/// Format a mention for the role, pinging its members.
// This is in the format of: `<@&ROLE_ID>`.
fn fmt(&self, f: &mut Formatter) -> FmtResult { Display::fmt(&self.mention(), f) }
}
impl Eq for Role {}
impl Ord for Role {
fn cmp(&self, other: &Role) -> Ordering {
if self.position == other.position {
self.id.cmp(&other.id)
} else {
self.position.cmp(&other.position)
}
}
}
impl PartialEq for Role {
fn eq(&self, other: &Role) -> bool { self.id == other.id }
}
impl PartialOrd for Role {
fn partial_cmp(&self, other: &Role) -> Option<Ordering> { Some(self.cmp(other)) }
}
#[cfg(feature = "model")]
impl RoleId {
/// Search the cache for the role.
#[cfg(feature = "cache")]
pub fn find(&self) -> Option<Role> {
let cache = CACHE.read().unwrap();
for guild in cache.guilds.values() {
let guild = guild.read().unwrap();
if!guild.roles.contains_key(self) |
if let Some(role) = guild.roles.get(self) {
return Some(role.clone());
}
}
None
}
}
impl Display for RoleId {
fn fmt(&self, f: &mut Formatter) -> FmtResult { Display::fmt(&self.0, f) }
}
impl From<Role> for RoleId {
/// Gets the Id of a role.
fn from(role: Role) -> RoleId { role.id }
}
impl<'a> From<&'a Role> for RoleId {
/// Gets the Id of a role.
fn from(role: &Role) -> RoleId { role.id }
}
| {
continue;
} | conditional_block |
vaesenc.rs | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn | () {
run_test(&Instruction { mnemonic: Mnemonic::VAESENC, operand1: Some(Direct(XMM0)), operand2: Some(Direct(XMM2)), operand3: Some(Direct(XMM6)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 226, 105, 220, 198], OperandSize::Dword)
}
fn vaesenc_2() {
run_test(&Instruction { mnemonic: Mnemonic::VAESENC, operand1: Some(Direct(XMM0)), operand2: Some(Direct(XMM7)), operand3: Some(Indirect(EDI, Some(OperandSize::Xmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 226, 65, 220, 7], OperandSize::Dword)
}
fn vaesenc_3() {
run_test(&Instruction { mnemonic: Mnemonic::VAESENC, operand1: Some(Direct(XMM1)), operand2: Some(Direct(XMM6)), operand3: Some(Direct(XMM0)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 226, 73, 220, 200], OperandSize::Qword)
}
fn vaesenc_4() {
run_test(&Instruction { mnemonic: Mnemonic::VAESENC, operand1: Some(Direct(XMM7)), operand2: Some(Direct(XMM4)), operand3: Some(IndirectScaledIndexedDisplaced(RDX, RAX, Two, 1425141855, Some(OperandSize::Xmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 226, 89, 220, 188, 66, 95, 240, 241, 84], OperandSize::Qword)
}
| vaesenc_1 | identifier_name |
vaesenc.rs | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn vaesenc_1() {
run_test(&Instruction { mnemonic: Mnemonic::VAESENC, operand1: Some(Direct(XMM0)), operand2: Some(Direct(XMM2)), operand3: Some(Direct(XMM6)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 226, 105, 220, 198], OperandSize::Dword)
}
| run_test(&Instruction { mnemonic: Mnemonic::VAESENC, operand1: Some(Direct(XMM0)), operand2: Some(Direct(XMM7)), operand3: Some(Indirect(EDI, Some(OperandSize::Xmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 226, 65, 220, 7], OperandSize::Dword)
}
fn vaesenc_3() {
run_test(&Instruction { mnemonic: Mnemonic::VAESENC, operand1: Some(Direct(XMM1)), operand2: Some(Direct(XMM6)), operand3: Some(Direct(XMM0)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 226, 73, 220, 200], OperandSize::Qword)
}
fn vaesenc_4() {
run_test(&Instruction { mnemonic: Mnemonic::VAESENC, operand1: Some(Direct(XMM7)), operand2: Some(Direct(XMM4)), operand3: Some(IndirectScaledIndexedDisplaced(RDX, RAX, Two, 1425141855, Some(OperandSize::Xmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 226, 89, 220, 188, 66, 95, 240, 241, 84], OperandSize::Qword)
} | fn vaesenc_2() { | random_line_split |
vaesenc.rs | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn vaesenc_1() {
run_test(&Instruction { mnemonic: Mnemonic::VAESENC, operand1: Some(Direct(XMM0)), operand2: Some(Direct(XMM2)), operand3: Some(Direct(XMM6)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 226, 105, 220, 198], OperandSize::Dword)
}
fn vaesenc_2() {
run_test(&Instruction { mnemonic: Mnemonic::VAESENC, operand1: Some(Direct(XMM0)), operand2: Some(Direct(XMM7)), operand3: Some(Indirect(EDI, Some(OperandSize::Xmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 226, 65, 220, 7], OperandSize::Dword)
}
fn vaesenc_3() {
run_test(&Instruction { mnemonic: Mnemonic::VAESENC, operand1: Some(Direct(XMM1)), operand2: Some(Direct(XMM6)), operand3: Some(Direct(XMM0)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 226, 73, 220, 200], OperandSize::Qword)
}
fn vaesenc_4() | {
run_test(&Instruction { mnemonic: Mnemonic::VAESENC, operand1: Some(Direct(XMM7)), operand2: Some(Direct(XMM4)), operand3: Some(IndirectScaledIndexedDisplaced(RDX, RAX, Two, 1425141855, Some(OperandSize::Xmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 226, 89, 220, 188, 66, 95, 240, 241, 84], OperandSize::Qword)
} | identifier_body |
|
pbrt.rs | use std::str::{self, FromStr};
use nom::{digit, space};
use linalg::vector::Vector3;
use linalg::transform::Transform;
named!(unsigned_float<f64>, map_res!(
map_res!(
recognize!(
alt_complete!(
delimited!(digit, tag!("."), opt!(complete!(digit))) |
delimited!(opt!(digit), tag!("."), digit) |
digit
)
),
str::from_utf8
),
FromStr::from_str
));
named!(signed_float<f64>, map!(
pair!(
opt!(alt!(tag!("+") | tag!("-"))),
unsigned_float
),
|(sign, value): (Option<&[u8]>, f64)| {
sign.and_then(
|s| if s[0] == ('-' as u8) { Some(-1f64) } else { None }
).unwrap_or(1f64) * value
}
));
named!(vector3<Vector3>, do_parse!(
x: signed_float >>
space >>
y: signed_float >>
space >>
z: signed_float >>
(Vector3::new(x, y, z))
));
named!(look_at<Transform>, do_parse!(
tag!("LookAt") >>
space >>
eye: vector3 >>
space >>
look: vector3 >>
space >>
up: vector3 >>
(Transform::look_at(eye, look, up))
));
named!(transformation<Transform>, alt!(
look_at
));
#[cfg(test)]
mod tests {
use nom::IResult::Done;
use super::*;
#[test]
fn test_unsigned_float() {
assert_eq!(unsigned_float(&b"134"[..]), Done(&b""[..], 134.0));
assert_eq!(unsigned_float(&b"1234."[..]), Done(&b""[..], 1234.0));
assert_eq!(unsigned_float(&b"0.145"[..]), Done(&b""[..], 0.145));
assert_eq!(unsigned_float(&b"2.45"[..]), Done(&b""[..], 2.45));
}
#[test]
fn | () {
assert_eq!(signed_float(&b"134"[..]), Done(&b""[..], 134.0));
assert_eq!(signed_float(&b"-1234."[..]), Done(&b""[..], -1234.0));
assert_eq!(signed_float(&b"0.145"[..]), Done(&b""[..], 0.145));
assert_eq!(signed_float(&b"-2.45"[..]), Done(&b""[..], -2.45));
}
#[test]
fn test_vector3() {
assert_eq!(vector3(&b"0.0 10.0 100.0"[..]),
Done(&b""[..], Vector3::new(0.0, 10.0, 100.0)));
assert_eq!(vector3(&b"0 10 100"[..]),
Done(&b""[..], Vector3::new(0.0, 10.0, 100.0)));
assert_eq!(vector3(&b"0 -10 100"[..]),
Done(&b""[..], Vector3::new(0.0, -10.0, 100.0)));
}
#[test]
#[ignore]
fn test_look_at() {
assert_eq!(
look_at(&b"LookAt 0 10 100 0 -1 0 0 1 0"[..]),
Done(
&b""[..],
Transform::look_at(
Vector3::new(0.0, 10.0, 100.0),
Vector3::new(0.0, -1.0, 0.0),
Vector3::new(0.0, 1.0, 0.0)
)
)
);
}
}
| test_signed_float | identifier_name |
pbrt.rs | use std::str::{self, FromStr};
use nom::{digit, space};
use linalg::vector::Vector3;
use linalg::transform::Transform;
named!(unsigned_float<f64>, map_res!(
map_res!(
recognize!(
alt_complete!(
delimited!(digit, tag!("."), opt!(complete!(digit))) |
delimited!(opt!(digit), tag!("."), digit) |
digit
)
),
str::from_utf8
),
FromStr::from_str
));
named!(signed_float<f64>, map!(
pair!(
opt!(alt!(tag!("+") | tag!("-"))),
unsigned_float
),
|(sign, value): (Option<&[u8]>, f64)| {
sign.and_then(
|s| if s[0] == ('-' as u8) { Some(-1f64) } else { None }
).unwrap_or(1f64) * value
}
));
named!(vector3<Vector3>, do_parse!(
x: signed_float >> | z: signed_float >>
(Vector3::new(x, y, z))
));
named!(look_at<Transform>, do_parse!(
tag!("LookAt") >>
space >>
eye: vector3 >>
space >>
look: vector3 >>
space >>
up: vector3 >>
(Transform::look_at(eye, look, up))
));
named!(transformation<Transform>, alt!(
look_at
));
#[cfg(test)]
mod tests {
use nom::IResult::Done;
use super::*;
#[test]
fn test_unsigned_float() {
assert_eq!(unsigned_float(&b"134"[..]), Done(&b""[..], 134.0));
assert_eq!(unsigned_float(&b"1234."[..]), Done(&b""[..], 1234.0));
assert_eq!(unsigned_float(&b"0.145"[..]), Done(&b""[..], 0.145));
assert_eq!(unsigned_float(&b"2.45"[..]), Done(&b""[..], 2.45));
}
#[test]
fn test_signed_float() {
assert_eq!(signed_float(&b"134"[..]), Done(&b""[..], 134.0));
assert_eq!(signed_float(&b"-1234."[..]), Done(&b""[..], -1234.0));
assert_eq!(signed_float(&b"0.145"[..]), Done(&b""[..], 0.145));
assert_eq!(signed_float(&b"-2.45"[..]), Done(&b""[..], -2.45));
}
#[test]
fn test_vector3() {
assert_eq!(vector3(&b"0.0 10.0 100.0"[..]),
Done(&b""[..], Vector3::new(0.0, 10.0, 100.0)));
assert_eq!(vector3(&b"0 10 100"[..]),
Done(&b""[..], Vector3::new(0.0, 10.0, 100.0)));
assert_eq!(vector3(&b"0 -10 100"[..]),
Done(&b""[..], Vector3::new(0.0, -10.0, 100.0)));
}
#[test]
#[ignore]
fn test_look_at() {
assert_eq!(
look_at(&b"LookAt 0 10 100 0 -1 0 0 1 0"[..]),
Done(
&b""[..],
Transform::look_at(
Vector3::new(0.0, 10.0, 100.0),
Vector3::new(0.0, -1.0, 0.0),
Vector3::new(0.0, 1.0, 0.0)
)
)
);
}
} | space >>
y: signed_float >>
space >> | random_line_split |
pbrt.rs | use std::str::{self, FromStr};
use nom::{digit, space};
use linalg::vector::Vector3;
use linalg::transform::Transform;
named!(unsigned_float<f64>, map_res!(
map_res!(
recognize!(
alt_complete!(
delimited!(digit, tag!("."), opt!(complete!(digit))) |
delimited!(opt!(digit), tag!("."), digit) |
digit
)
),
str::from_utf8
),
FromStr::from_str
));
named!(signed_float<f64>, map!(
pair!(
opt!(alt!(tag!("+") | tag!("-"))),
unsigned_float
),
|(sign, value): (Option<&[u8]>, f64)| {
sign.and_then(
|s| if s[0] == ('-' as u8) { Some(-1f64) } else { None }
).unwrap_or(1f64) * value
}
));
named!(vector3<Vector3>, do_parse!(
x: signed_float >>
space >>
y: signed_float >>
space >>
z: signed_float >>
(Vector3::new(x, y, z))
));
named!(look_at<Transform>, do_parse!(
tag!("LookAt") >>
space >>
eye: vector3 >>
space >>
look: vector3 >>
space >>
up: vector3 >>
(Transform::look_at(eye, look, up))
));
named!(transformation<Transform>, alt!(
look_at
));
#[cfg(test)]
mod tests {
use nom::IResult::Done;
use super::*;
#[test]
fn test_unsigned_float() {
assert_eq!(unsigned_float(&b"134"[..]), Done(&b""[..], 134.0));
assert_eq!(unsigned_float(&b"1234."[..]), Done(&b""[..], 1234.0));
assert_eq!(unsigned_float(&b"0.145"[..]), Done(&b""[..], 0.145));
assert_eq!(unsigned_float(&b"2.45"[..]), Done(&b""[..], 2.45));
}
#[test]
fn test_signed_float() {
assert_eq!(signed_float(&b"134"[..]), Done(&b""[..], 134.0));
assert_eq!(signed_float(&b"-1234."[..]), Done(&b""[..], -1234.0));
assert_eq!(signed_float(&b"0.145"[..]), Done(&b""[..], 0.145));
assert_eq!(signed_float(&b"-2.45"[..]), Done(&b""[..], -2.45));
}
#[test]
fn test_vector3() {
assert_eq!(vector3(&b"0.0 10.0 100.0"[..]),
Done(&b""[..], Vector3::new(0.0, 10.0, 100.0)));
assert_eq!(vector3(&b"0 10 100"[..]),
Done(&b""[..], Vector3::new(0.0, 10.0, 100.0)));
assert_eq!(vector3(&b"0 -10 100"[..]),
Done(&b""[..], Vector3::new(0.0, -10.0, 100.0)));
}
#[test]
#[ignore]
fn test_look_at() |
}
| {
assert_eq!(
look_at(&b"LookAt 0 10 100 0 -1 0 0 1 0"[..]),
Done(
&b""[..],
Transform::look_at(
Vector3::new(0.0, 10.0, 100.0),
Vector3::new(0.0, -1.0, 0.0),
Vector3::new(0.0, 1.0, 0.0)
)
)
);
} | identifier_body |
plane.rs | // Copyright GFX Developers 2014-2017
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use Vertex;
use super::Quad;
use super::generators::{SharedVertex, IndexedPolygon};
/// Represents a 2D plane with origin of (0, 0), from 1 to -1
#[derive(Clone, Copy)]
pub struct Plane {
subdivide_x: usize,
subdivide_y: usize,
x: usize,
y: usize,
}
impl Plane {
/// create a new cube generator
pub fn new() -> Plane {
Plane {
subdivide_x: 1,
subdivide_y: 1,
x: 0,
y: 0,
}
}
/// create a subdivided plane. This can be used to build
/// a grid of points.
/// `x` is the number of subdivisions in the x axis
/// `y` is the number of subdivisions in the y axis
pub fn subdivide(x: usize, y: usize) -> Plane {
assert!(x > 0 && y > 0);
Plane {
subdivide_x: x,
subdivide_y: y,
x: 0,
y: 0,
}
}
fn vert(&self, x: usize, y: usize) -> Vertex {
let sx = self.subdivide_x as f32;
let sy = self.subdivide_y as f32;
let x = (2. / sx) * x as f32 - 1.;
let y = (2. / sy) * y as f32 - 1.;
Vertex {
pos: [x, y, 0.0],
normal: [0., 0., 1.],
}
}
}
impl Iterator for Plane {
type Item = Quad<Vertex>;
fn size_hint(&self) -> (usize, Option<usize>) {
let n = (self.subdivide_y - self.y) * self.subdivide_x + (self.subdivide_x - self.x);
(n, Some(n))
}
fn next(&mut self) -> Option<Quad<Vertex>> {
if self.x == self.subdivide_x {
self.y += 1;
if self.y >= self.subdivide_y |
self.x = 0;
}
let x = self.vert(self.x, self.y);
let y = self.vert(self.x + 1, self.y);
let z = self.vert(self.x + 1, self.y + 1);
let w = self.vert(self.x, self.y + 1);
self.x += 1;
Some(Quad::new(x, y, z, w))
}
}
impl SharedVertex<Vertex> for Plane {
fn shared_vertex(&self, idx: usize) -> Vertex {
let y = idx / (self.subdivide_x + 1);
let x = idx % (self.subdivide_x + 1);
self.vert(x, y)
}
fn shared_vertex_count(&self) -> usize {
(self.subdivide_x + 1) * (self.subdivide_y + 1)
}
}
impl IndexedPolygon<Quad<usize>> for Plane {
fn indexed_polygon(&self, idx: usize) -> Quad<usize> {
let y = idx / self.subdivide_x;
let x = idx % self.subdivide_x;
let base = y * (self.subdivide_x + 1) + x;
Quad::new(base,
base + 1,
base + self.subdivide_x + 2,
base + self.subdivide_x + 1)
}
fn indexed_polygon_count(&self) -> usize {
self.subdivide_x * self.subdivide_y
}
}
#[test]
fn test_shared_vertex_count() {
let plane = Plane::new();
assert_eq!(plane.shared_vertex_count(), 4);
assert_eq!(plane.indexed_polygon_count(), 1);
let plane = Plane::subdivide(2, 2);
assert_eq!(plane.shared_vertex_count(), 9);
assert_eq!(plane.indexed_polygon_count(), 4);
let plane = Plane::subdivide(4, 4);
assert_eq!(plane.shared_vertex_count(), 25);
assert_eq!(plane.indexed_polygon_count(), 16);
}
| {
return None;
} | conditional_block |
plane.rs | // Copyright GFX Developers 2014-2017
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use Vertex;
use super::Quad;
use super::generators::{SharedVertex, IndexedPolygon};
/// Represents a 2D plane with origin of (0, 0), from 1 to -1
#[derive(Clone, Copy)]
pub struct Plane {
subdivide_x: usize,
subdivide_y: usize,
x: usize,
y: usize,
}
impl Plane {
/// create a new cube generator
pub fn new() -> Plane {
Plane {
subdivide_x: 1,
subdivide_y: 1,
x: 0,
y: 0,
}
}
/// create a subdivided plane. This can be used to build
/// a grid of points.
/// `x` is the number of subdivisions in the x axis
/// `y` is the number of subdivisions in the y axis
pub fn subdivide(x: usize, y: usize) -> Plane {
assert!(x > 0 && y > 0);
Plane {
subdivide_x: x,
subdivide_y: y,
x: 0,
y: 0,
}
}
fn vert(&self, x: usize, y: usize) -> Vertex {
let sx = self.subdivide_x as f32;
let sy = self.subdivide_y as f32;
let x = (2. / sx) * x as f32 - 1.;
let y = (2. / sy) * y as f32 - 1.;
Vertex {
pos: [x, y, 0.0],
normal: [0., 0., 1.],
}
}
}
impl Iterator for Plane {
type Item = Quad<Vertex>;
fn size_hint(&self) -> (usize, Option<usize>) {
let n = (self.subdivide_y - self.y) * self.subdivide_x + (self.subdivide_x - self.x);
(n, Some(n))
}
fn next(&mut self) -> Option<Quad<Vertex>> {
if self.x == self.subdivide_x {
self.y += 1;
if self.y >= self.subdivide_y {
return None;
}
self.x = 0;
}
let x = self.vert(self.x, self.y);
let y = self.vert(self.x + 1, self.y);
let z = self.vert(self.x + 1, self.y + 1);
let w = self.vert(self.x, self.y + 1);
self.x += 1;
Some(Quad::new(x, y, z, w))
}
}
impl SharedVertex<Vertex> for Plane {
fn shared_vertex(&self, idx: usize) -> Vertex {
let y = idx / (self.subdivide_x + 1);
let x = idx % (self.subdivide_x + 1);
self.vert(x, y)
}
fn shared_vertex_count(&self) -> usize {
(self.subdivide_x + 1) * (self.subdivide_y + 1)
}
}
impl IndexedPolygon<Quad<usize>> for Plane {
fn indexed_polygon(&self, idx: usize) -> Quad<usize> {
let y = idx / self.subdivide_x;
let x = idx % self.subdivide_x;
let base = y * (self.subdivide_x + 1) + x;
Quad::new(base,
base + 1,
base + self.subdivide_x + 2,
base + self.subdivide_x + 1)
}
fn indexed_polygon_count(&self) -> usize |
}
#[test]
fn test_shared_vertex_count() {
let plane = Plane::new();
assert_eq!(plane.shared_vertex_count(), 4);
assert_eq!(plane.indexed_polygon_count(), 1);
let plane = Plane::subdivide(2, 2);
assert_eq!(plane.shared_vertex_count(), 9);
assert_eq!(plane.indexed_polygon_count(), 4);
let plane = Plane::subdivide(4, 4);
assert_eq!(plane.shared_vertex_count(), 25);
assert_eq!(plane.indexed_polygon_count(), 16);
}
| {
self.subdivide_x * self.subdivide_y
} | identifier_body |
plane.rs | // Copyright GFX Developers 2014-2017
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use Vertex;
use super::Quad;
use super::generators::{SharedVertex, IndexedPolygon};
/// Represents a 2D plane with origin of (0, 0), from 1 to -1
#[derive(Clone, Copy)]
pub struct Plane {
subdivide_x: usize,
subdivide_y: usize,
x: usize,
y: usize,
}
impl Plane {
/// create a new cube generator
pub fn new() -> Plane {
Plane {
subdivide_x: 1,
subdivide_y: 1,
x: 0,
y: 0,
}
}
/// create a subdivided plane. This can be used to build
/// a grid of points.
/// `x` is the number of subdivisions in the x axis
/// `y` is the number of subdivisions in the y axis
pub fn subdivide(x: usize, y: usize) -> Plane {
assert!(x > 0 && y > 0);
Plane {
subdivide_x: x,
subdivide_y: y,
x: 0,
y: 0,
}
}
fn vert(&self, x: usize, y: usize) -> Vertex {
let sx = self.subdivide_x as f32;
let sy = self.subdivide_y as f32;
let x = (2. / sx) * x as f32 - 1.;
let y = (2. / sy) * y as f32 - 1.;
Vertex {
pos: [x, y, 0.0],
normal: [0., 0., 1.],
}
}
}
impl Iterator for Plane {
type Item = Quad<Vertex>;
fn size_hint(&self) -> (usize, Option<usize>) {
let n = (self.subdivide_y - self.y) * self.subdivide_x + (self.subdivide_x - self.x);
(n, Some(n))
}
fn next(&mut self) -> Option<Quad<Vertex>> {
if self.x == self.subdivide_x {
self.y += 1;
if self.y >= self.subdivide_y {
return None;
}
self.x = 0;
}
let x = self.vert(self.x, self.y);
let y = self.vert(self.x + 1, self.y);
let z = self.vert(self.x + 1, self.y + 1);
let w = self.vert(self.x, self.y + 1);
self.x += 1;
Some(Quad::new(x, y, z, w))
}
}
impl SharedVertex<Vertex> for Plane {
fn shared_vertex(&self, idx: usize) -> Vertex {
let y = idx / (self.subdivide_x + 1);
let x = idx % (self.subdivide_x + 1);
self.vert(x, y)
}
fn shared_vertex_count(&self) -> usize {
(self.subdivide_x + 1) * (self.subdivide_y + 1)
}
}
impl IndexedPolygon<Quad<usize>> for Plane {
fn indexed_polygon(&self, idx: usize) -> Quad<usize> {
let y = idx / self.subdivide_x;
let x = idx % self.subdivide_x;
let base = y * (self.subdivide_x + 1) + x;
Quad::new(base,
base + 1,
base + self.subdivide_x + 2,
base + self.subdivide_x + 1)
}
fn | (&self) -> usize {
self.subdivide_x * self.subdivide_y
}
}
#[test]
fn test_shared_vertex_count() {
let plane = Plane::new();
assert_eq!(plane.shared_vertex_count(), 4);
assert_eq!(plane.indexed_polygon_count(), 1);
let plane = Plane::subdivide(2, 2);
assert_eq!(plane.shared_vertex_count(), 9);
assert_eq!(plane.indexed_polygon_count(), 4);
let plane = Plane::subdivide(4, 4);
assert_eq!(plane.shared_vertex_count(), 25);
assert_eq!(plane.indexed_polygon_count(), 16);
}
| indexed_polygon_count | identifier_name |
plane.rs | // Copyright GFX Developers 2014-2017
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use Vertex;
use super::Quad;
use super::generators::{SharedVertex, IndexedPolygon};
/// Represents a 2D plane with origin of (0, 0), from 1 to -1
#[derive(Clone, Copy)]
pub struct Plane {
subdivide_x: usize,
subdivide_y: usize,
x: usize,
y: usize,
}
impl Plane {
/// create a new cube generator
pub fn new() -> Plane {
Plane {
subdivide_x: 1,
subdivide_y: 1,
x: 0,
y: 0,
}
}
/// create a subdivided plane. This can be used to build
/// a grid of points.
/// `x` is the number of subdivisions in the x axis
/// `y` is the number of subdivisions in the y axis
pub fn subdivide(x: usize, y: usize) -> Plane {
assert!(x > 0 && y > 0);
Plane {
subdivide_x: x,
subdivide_y: y,
x: 0,
y: 0,
}
}
fn vert(&self, x: usize, y: usize) -> Vertex {
let sx = self.subdivide_x as f32;
let sy = self.subdivide_y as f32;
let x = (2. / sx) * x as f32 - 1.;
let y = (2. / sy) * y as f32 - 1.;
Vertex {
pos: [x, y, 0.0],
normal: [0., 0., 1.],
}
}
}
impl Iterator for Plane {
type Item = Quad<Vertex>;
fn size_hint(&self) -> (usize, Option<usize>) {
let n = (self.subdivide_y - self.y) * self.subdivide_x + (self.subdivide_x - self.x);
(n, Some(n))
}
fn next(&mut self) -> Option<Quad<Vertex>> {
if self.x == self.subdivide_x {
self.y += 1;
if self.y >= self.subdivide_y {
return None;
}
self.x = 0;
}
let x = self.vert(self.x, self.y); |
Some(Quad::new(x, y, z, w))
}
}
impl SharedVertex<Vertex> for Plane {
fn shared_vertex(&self, idx: usize) -> Vertex {
let y = idx / (self.subdivide_x + 1);
let x = idx % (self.subdivide_x + 1);
self.vert(x, y)
}
fn shared_vertex_count(&self) -> usize {
(self.subdivide_x + 1) * (self.subdivide_y + 1)
}
}
impl IndexedPolygon<Quad<usize>> for Plane {
fn indexed_polygon(&self, idx: usize) -> Quad<usize> {
let y = idx / self.subdivide_x;
let x = idx % self.subdivide_x;
let base = y * (self.subdivide_x + 1) + x;
Quad::new(base,
base + 1,
base + self.subdivide_x + 2,
base + self.subdivide_x + 1)
}
fn indexed_polygon_count(&self) -> usize {
self.subdivide_x * self.subdivide_y
}
}
#[test]
fn test_shared_vertex_count() {
let plane = Plane::new();
assert_eq!(plane.shared_vertex_count(), 4);
assert_eq!(plane.indexed_polygon_count(), 1);
let plane = Plane::subdivide(2, 2);
assert_eq!(plane.shared_vertex_count(), 9);
assert_eq!(plane.indexed_polygon_count(), 4);
let plane = Plane::subdivide(4, 4);
assert_eq!(plane.shared_vertex_count(), 25);
assert_eq!(plane.indexed_polygon_count(), 16);
} | let y = self.vert(self.x + 1, self.y);
let z = self.vert(self.x + 1, self.y + 1);
let w = self.vert(self.x, self.y + 1);
self.x += 1; | random_line_split |
union-ub-fat-ptr.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unused)]
#![allow(const_err)] // make sure we cannot allow away the errors tested here
// normalize-stderr-test "alignment \d+" -> "alignment N"
// normalize-stderr-test "offset \d+" -> "offset N"
// normalize-stderr-test "allocation \d+" -> "allocation N"
// normalize-stderr-test "size \d+" -> "size N"
union BoolTransmute {
val: u8,
bl: bool,
}
#[repr(C)]
#[derive(Copy, Clone)]
struct SliceRepr {
ptr: *const u8,
len: usize,
}
#[repr(C)]
#[derive(Copy, Clone)]
struct BadSliceRepr {
ptr: *const u8,
len: &'static u8,
}
union SliceTransmute {
repr: SliceRepr,
bad: BadSliceRepr,
slice: &'static [u8],
str: &'static str,
my_str: &'static MyStr,
my_slice: &'static MySliceBool,
}
#[repr(C)]
#[derive(Copy, Clone)]
struct DynRepr {
ptr: *const u8,
vtable: *const u8,
}
#[repr(C)]
#[derive(Copy, Clone)]
struct DynRepr2 {
ptr: *const u8,
vtable: *const u64,
}
#[repr(C)]
#[derive(Copy, Clone)]
struct BadDynRepr {
ptr: *const u8,
vtable: usize,
}
union DynTransmute {
repr: DynRepr,
repr2: DynRepr2,
bad: BadDynRepr,
rust: &'static Trait,
}
trait Trait {}
impl Trait for bool {}
// custom unsized type
struct MyStr(str);
// custom unsized type with sized fields
struct MySlice<T:?Sized>(bool, T);
type MySliceBool = MySlice<[bool]>;
// OK
const A: &str = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 1 } }.str};
// bad str
const B: &str = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.str};
//~^ ERROR it is undefined behavior to use this value
// bad str
const C: &str = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.str};
//~^ ERROR it is undefined behavior to use this value
// bad str in user-defined unsized type
const C2: &MyStr = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.my_str};
//~^ ERROR it is undefined behavior to use this value
// OK
const A2: &[u8] = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 1 } }.slice};
// bad slice
const B2: &[u8] = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.slice};
//~^ ERROR it is undefined behavior to use this value
// bad slice
const C3: &[u8] = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.slice};
//~^ ERROR it is undefined behavior to use this value
// bad trait object
const D: &Trait = unsafe { DynTransmute { repr: DynRepr { ptr: &92, vtable: &3 } }.rust};
//~^ ERROR it is undefined behavior to use this value
// bad trait object
const E: &Trait = unsafe { DynTransmute { repr2: DynRepr2 { ptr: &92, vtable: &3 } }.rust};
//~^ ERROR it is undefined behavior to use this value
// bad trait object
const F: &Trait = unsafe { DynTransmute { bad: BadDynRepr { ptr: &92, vtable: 3 } }.rust};
//~^ ERROR it is undefined behavior to use this value
// bad data *inside* the trait object
const G: &Trait = &unsafe { BoolTransmute { val: 3 }.bl };
//~^ ERROR it is undefined behavior to use this value
// bad data *inside* the slice
const H: &[bool] = &[unsafe { BoolTransmute { val: 3 }.bl }];
//~^ ERROR it is undefined behavior to use this value
// good MySliceBool
const I1: &MySliceBool = &MySlice(true, [false]);
// bad: sized field is not okay
const I2: &MySliceBool = &MySlice(unsafe { BoolTransmute { val: 3 }.bl }, [false]);
//~^ ERROR it is undefined behavior to use this value
// bad: unsized part is not okay
const I3: &MySliceBool = &MySlice(true, [unsafe { BoolTransmute { val: 3 }.bl }]);
//~^ ERROR it is undefined behavior to use this value
// invalid UTF-8
const J1: &str = unsafe { SliceTransmute { slice: &[0xFF] }.str };
//~^ ERROR it is undefined behavior to use this value
// invalid UTF-8 in user-defined str-like | //~^ ERROR it is undefined behavior to use this value
fn main() {
} | const J2: &MyStr = unsafe { SliceTransmute { slice: &[0xFF] }.my_str }; | random_line_split |
union-ub-fat-ptr.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unused)]
#![allow(const_err)] // make sure we cannot allow away the errors tested here
// normalize-stderr-test "alignment \d+" -> "alignment N"
// normalize-stderr-test "offset \d+" -> "offset N"
// normalize-stderr-test "allocation \d+" -> "allocation N"
// normalize-stderr-test "size \d+" -> "size N"
union BoolTransmute {
val: u8,
bl: bool,
}
#[repr(C)]
#[derive(Copy, Clone)]
struct SliceRepr {
ptr: *const u8,
len: usize,
}
#[repr(C)]
#[derive(Copy, Clone)]
struct | {
ptr: *const u8,
len: &'static u8,
}
union SliceTransmute {
repr: SliceRepr,
bad: BadSliceRepr,
slice: &'static [u8],
str: &'static str,
my_str: &'static MyStr,
my_slice: &'static MySliceBool,
}
#[repr(C)]
#[derive(Copy, Clone)]
struct DynRepr {
ptr: *const u8,
vtable: *const u8,
}
#[repr(C)]
#[derive(Copy, Clone)]
struct DynRepr2 {
ptr: *const u8,
vtable: *const u64,
}
#[repr(C)]
#[derive(Copy, Clone)]
struct BadDynRepr {
ptr: *const u8,
vtable: usize,
}
union DynTransmute {
repr: DynRepr,
repr2: DynRepr2,
bad: BadDynRepr,
rust: &'static Trait,
}
trait Trait {}
impl Trait for bool {}
// custom unsized type
struct MyStr(str);
// custom unsized type with sized fields
struct MySlice<T:?Sized>(bool, T);
type MySliceBool = MySlice<[bool]>;
// OK
const A: &str = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 1 } }.str};
// bad str
const B: &str = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.str};
//~^ ERROR it is undefined behavior to use this value
// bad str
const C: &str = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.str};
//~^ ERROR it is undefined behavior to use this value
// bad str in user-defined unsized type
const C2: &MyStr = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.my_str};
//~^ ERROR it is undefined behavior to use this value
// OK
const A2: &[u8] = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 1 } }.slice};
// bad slice
const B2: &[u8] = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.slice};
//~^ ERROR it is undefined behavior to use this value
// bad slice
const C3: &[u8] = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.slice};
//~^ ERROR it is undefined behavior to use this value
// bad trait object
const D: &Trait = unsafe { DynTransmute { repr: DynRepr { ptr: &92, vtable: &3 } }.rust};
//~^ ERROR it is undefined behavior to use this value
// bad trait object
const E: &Trait = unsafe { DynTransmute { repr2: DynRepr2 { ptr: &92, vtable: &3 } }.rust};
//~^ ERROR it is undefined behavior to use this value
// bad trait object
const F: &Trait = unsafe { DynTransmute { bad: BadDynRepr { ptr: &92, vtable: 3 } }.rust};
//~^ ERROR it is undefined behavior to use this value
// bad data *inside* the trait object
const G: &Trait = &unsafe { BoolTransmute { val: 3 }.bl };
//~^ ERROR it is undefined behavior to use this value
// bad data *inside* the slice
const H: &[bool] = &[unsafe { BoolTransmute { val: 3 }.bl }];
//~^ ERROR it is undefined behavior to use this value
// good MySliceBool
const I1: &MySliceBool = &MySlice(true, [false]);
// bad: sized field is not okay
const I2: &MySliceBool = &MySlice(unsafe { BoolTransmute { val: 3 }.bl }, [false]);
//~^ ERROR it is undefined behavior to use this value
// bad: unsized part is not okay
const I3: &MySliceBool = &MySlice(true, [unsafe { BoolTransmute { val: 3 }.bl }]);
//~^ ERROR it is undefined behavior to use this value
// invalid UTF-8
const J1: &str = unsafe { SliceTransmute { slice: &[0xFF] }.str };
//~^ ERROR it is undefined behavior to use this value
// invalid UTF-8 in user-defined str-like
const J2: &MyStr = unsafe { SliceTransmute { slice: &[0xFF] }.my_str };
//~^ ERROR it is undefined behavior to use this value
fn main() {
}
| BadSliceRepr | identifier_name |
union-ub-fat-ptr.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unused)]
#![allow(const_err)] // make sure we cannot allow away the errors tested here
// normalize-stderr-test "alignment \d+" -> "alignment N"
// normalize-stderr-test "offset \d+" -> "offset N"
// normalize-stderr-test "allocation \d+" -> "allocation N"
// normalize-stderr-test "size \d+" -> "size N"
union BoolTransmute {
val: u8,
bl: bool,
}
#[repr(C)]
#[derive(Copy, Clone)]
struct SliceRepr {
ptr: *const u8,
len: usize,
}
#[repr(C)]
#[derive(Copy, Clone)]
struct BadSliceRepr {
ptr: *const u8,
len: &'static u8,
}
union SliceTransmute {
repr: SliceRepr,
bad: BadSliceRepr,
slice: &'static [u8],
str: &'static str,
my_str: &'static MyStr,
my_slice: &'static MySliceBool,
}
#[repr(C)]
#[derive(Copy, Clone)]
struct DynRepr {
ptr: *const u8,
vtable: *const u8,
}
#[repr(C)]
#[derive(Copy, Clone)]
struct DynRepr2 {
ptr: *const u8,
vtable: *const u64,
}
#[repr(C)]
#[derive(Copy, Clone)]
struct BadDynRepr {
ptr: *const u8,
vtable: usize,
}
union DynTransmute {
repr: DynRepr,
repr2: DynRepr2,
bad: BadDynRepr,
rust: &'static Trait,
}
trait Trait {}
impl Trait for bool {}
// custom unsized type
struct MyStr(str);
// custom unsized type with sized fields
struct MySlice<T:?Sized>(bool, T);
type MySliceBool = MySlice<[bool]>;
// OK
const A: &str = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 1 } }.str};
// bad str
const B: &str = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.str};
//~^ ERROR it is undefined behavior to use this value
// bad str
const C: &str = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.str};
//~^ ERROR it is undefined behavior to use this value
// bad str in user-defined unsized type
const C2: &MyStr = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.my_str};
//~^ ERROR it is undefined behavior to use this value
// OK
const A2: &[u8] = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 1 } }.slice};
// bad slice
const B2: &[u8] = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.slice};
//~^ ERROR it is undefined behavior to use this value
// bad slice
const C3: &[u8] = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.slice};
//~^ ERROR it is undefined behavior to use this value
// bad trait object
const D: &Trait = unsafe { DynTransmute { repr: DynRepr { ptr: &92, vtable: &3 } }.rust};
//~^ ERROR it is undefined behavior to use this value
// bad trait object
const E: &Trait = unsafe { DynTransmute { repr2: DynRepr2 { ptr: &92, vtable: &3 } }.rust};
//~^ ERROR it is undefined behavior to use this value
// bad trait object
const F: &Trait = unsafe { DynTransmute { bad: BadDynRepr { ptr: &92, vtable: 3 } }.rust};
//~^ ERROR it is undefined behavior to use this value
// bad data *inside* the trait object
const G: &Trait = &unsafe { BoolTransmute { val: 3 }.bl };
//~^ ERROR it is undefined behavior to use this value
// bad data *inside* the slice
const H: &[bool] = &[unsafe { BoolTransmute { val: 3 }.bl }];
//~^ ERROR it is undefined behavior to use this value
// good MySliceBool
const I1: &MySliceBool = &MySlice(true, [false]);
// bad: sized field is not okay
const I2: &MySliceBool = &MySlice(unsafe { BoolTransmute { val: 3 }.bl }, [false]);
//~^ ERROR it is undefined behavior to use this value
// bad: unsized part is not okay
const I3: &MySliceBool = &MySlice(true, [unsafe { BoolTransmute { val: 3 }.bl }]);
//~^ ERROR it is undefined behavior to use this value
// invalid UTF-8
const J1: &str = unsafe { SliceTransmute { slice: &[0xFF] }.str };
//~^ ERROR it is undefined behavior to use this value
// invalid UTF-8 in user-defined str-like
const J2: &MyStr = unsafe { SliceTransmute { slice: &[0xFF] }.my_str };
//~^ ERROR it is undefined behavior to use this value
fn main() | {
} | identifier_body |
|
xoshiro128plusplus.rs | // Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[cfg(feature="serde1")] use serde::{Serialize, Deserialize};
use rand_core::impls::{next_u64_via_u32, fill_bytes_via_next};
use rand_core::le::read_u32_into;
use rand_core::{SeedableRng, RngCore, Error};
/// A xoshiro128++ random number generator.
///
/// The xoshiro128++ algorithm is not suitable for cryptographic purposes, but
/// is very fast and has excellent statistical properties.
///
/// The algorithm used here is translated from [the `xoshiro128plusplus.c`
/// reference source code](http://xoshiro.di.unimi.it/xoshiro128plusplus.c) by
/// David Blackman and Sebastiano Vigna.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature="serde1", derive(Serialize, Deserialize))]
pub struct Xoshiro128PlusPlus {
s: [u32; 4],
}
impl SeedableRng for Xoshiro128PlusPlus {
type Seed = [u8; 16];
/// Create a new `Xoshiro128PlusPlus`. If `seed` is entirely 0, it will be
/// mapped to a different seed.
#[inline]
fn from_seed(seed: [u8; 16]) -> Xoshiro128PlusPlus {
if seed.iter().all(|&x| x == 0) {
return Self::seed_from_u64(0);
}
let mut state = [0; 4];
read_u32_into(&seed, &mut state);
Xoshiro128PlusPlus { s: state }
}
/// Create a new `Xoshiro128PlusPlus` from a `u64` seed.
///
/// This uses the SplitMix64 generator internally.
fn seed_from_u64(mut state: u64) -> Self {
const PHI: u64 = 0x9e3779b97f4a7c15;
let mut seed = Self::Seed::default();
for chunk in seed.as_mut().chunks_mut(8) {
state = state.wrapping_add(PHI);
let mut z = state;
z = (z ^ (z >> 30)).wrapping_mul(0xbf58476d1ce4e5b9);
z = (z ^ (z >> 27)).wrapping_mul(0x94d049bb133111eb);
z = z ^ (z >> 31);
chunk.copy_from_slice(&z.to_le_bytes());
}
Self::from_seed(seed)
}
}
impl RngCore for Xoshiro128PlusPlus {
#[inline]
fn next_u32(&mut self) -> u32 {
let result_starstar = self.s[0]
.wrapping_add(self.s[3])
.rotate_left(7)
.wrapping_add(self.s[0]);
let t = self.s[1] << 9;
self.s[2] ^= self.s[0];
self.s[3] ^= self.s[1];
self.s[1] ^= self.s[2];
self.s[0] ^= self.s[3];
self.s[2] ^= t;
self.s[3] = self.s[3].rotate_left(11);
result_starstar
}
#[inline]
fn | (&mut self) -> u64 {
next_u64_via_u32(self)
}
#[inline]
fn fill_bytes(&mut self, dest: &mut [u8]) {
fill_bytes_via_next(self, dest);
}
#[inline]
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
self.fill_bytes(dest);
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn reference() {
let mut rng = Xoshiro128PlusPlus::from_seed(
[1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0]);
// These values were produced with the reference implementation:
// http://xoshiro.di.unimi.it/xoshiro128plusplus.c
let expected = [
641, 1573767, 3222811527, 3517856514, 836907274, 4247214768,
3867114732, 1355841295, 495546011, 621204420,
];
for &e in &expected {
assert_eq!(rng.next_u32(), e);
}
}
}
| next_u64 | identifier_name |
xoshiro128plusplus.rs | // Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[cfg(feature="serde1")] use serde::{Serialize, Deserialize};
use rand_core::impls::{next_u64_via_u32, fill_bytes_via_next};
use rand_core::le::read_u32_into;
use rand_core::{SeedableRng, RngCore, Error};
/// A xoshiro128++ random number generator.
///
/// The xoshiro128++ algorithm is not suitable for cryptographic purposes, but
/// is very fast and has excellent statistical properties.
///
/// The algorithm used here is translated from [the `xoshiro128plusplus.c`
/// reference source code](http://xoshiro.di.unimi.it/xoshiro128plusplus.c) by
/// David Blackman and Sebastiano Vigna.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature="serde1", derive(Serialize, Deserialize))]
pub struct Xoshiro128PlusPlus {
s: [u32; 4],
}
impl SeedableRng for Xoshiro128PlusPlus {
type Seed = [u8; 16];
/// Create a new `Xoshiro128PlusPlus`. If `seed` is entirely 0, it will be
/// mapped to a different seed.
#[inline]
fn from_seed(seed: [u8; 16]) -> Xoshiro128PlusPlus {
if seed.iter().all(|&x| x == 0) {
return Self::seed_from_u64(0);
}
let mut state = [0; 4];
read_u32_into(&seed, &mut state);
Xoshiro128PlusPlus { s: state }
}
/// Create a new `Xoshiro128PlusPlus` from a `u64` seed.
///
/// This uses the SplitMix64 generator internally.
fn seed_from_u64(mut state: u64) -> Self {
const PHI: u64 = 0x9e3779b97f4a7c15;
let mut seed = Self::Seed::default();
for chunk in seed.as_mut().chunks_mut(8) {
state = state.wrapping_add(PHI);
let mut z = state;
z = (z ^ (z >> 30)).wrapping_mul(0xbf58476d1ce4e5b9);
z = (z ^ (z >> 27)).wrapping_mul(0x94d049bb133111eb);
z = z ^ (z >> 31);
chunk.copy_from_slice(&z.to_le_bytes());
}
Self::from_seed(seed)
}
}
impl RngCore for Xoshiro128PlusPlus {
#[inline]
fn next_u32(&mut self) -> u32 {
let result_starstar = self.s[0]
.wrapping_add(self.s[3])
.rotate_left(7)
.wrapping_add(self.s[0]);
let t = self.s[1] << 9;
self.s[2] ^= self.s[0];
self.s[3] ^= self.s[1];
self.s[1] ^= self.s[2];
self.s[0] ^= self.s[3];
self.s[2] ^= t;
self.s[3] = self.s[3].rotate_left(11);
result_starstar
}
#[inline]
fn next_u64(&mut self) -> u64 {
next_u64_via_u32(self)
}
#[inline]
fn fill_bytes(&mut self, dest: &mut [u8]) |
#[inline]
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
self.fill_bytes(dest);
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn reference() {
let mut rng = Xoshiro128PlusPlus::from_seed(
[1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0]);
// These values were produced with the reference implementation:
// http://xoshiro.di.unimi.it/xoshiro128plusplus.c
let expected = [
641, 1573767, 3222811527, 3517856514, 836907274, 4247214768,
3867114732, 1355841295, 495546011, 621204420,
];
for &e in &expected {
assert_eq!(rng.next_u32(), e);
}
}
}
| {
fill_bytes_via_next(self, dest);
} | identifier_body |
xoshiro128plusplus.rs | // Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[cfg(feature="serde1")] use serde::{Serialize, Deserialize};
use rand_core::impls::{next_u64_via_u32, fill_bytes_via_next};
use rand_core::le::read_u32_into;
use rand_core::{SeedableRng, RngCore, Error};
/// A xoshiro128++ random number generator.
///
/// The xoshiro128++ algorithm is not suitable for cryptographic purposes, but
/// is very fast and has excellent statistical properties.
///
/// The algorithm used here is translated from [the `xoshiro128plusplus.c`
/// reference source code](http://xoshiro.di.unimi.it/xoshiro128plusplus.c) by
/// David Blackman and Sebastiano Vigna.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature="serde1", derive(Serialize, Deserialize))]
pub struct Xoshiro128PlusPlus {
s: [u32; 4],
}
impl SeedableRng for Xoshiro128PlusPlus {
type Seed = [u8; 16];
/// Create a new `Xoshiro128PlusPlus`. If `seed` is entirely 0, it will be
/// mapped to a different seed.
#[inline]
fn from_seed(seed: [u8; 16]) -> Xoshiro128PlusPlus {
if seed.iter().all(|&x| x == 0) |
let mut state = [0; 4];
read_u32_into(&seed, &mut state);
Xoshiro128PlusPlus { s: state }
}
/// Create a new `Xoshiro128PlusPlus` from a `u64` seed.
///
/// This uses the SplitMix64 generator internally.
fn seed_from_u64(mut state: u64) -> Self {
const PHI: u64 = 0x9e3779b97f4a7c15;
let mut seed = Self::Seed::default();
for chunk in seed.as_mut().chunks_mut(8) {
state = state.wrapping_add(PHI);
let mut z = state;
z = (z ^ (z >> 30)).wrapping_mul(0xbf58476d1ce4e5b9);
z = (z ^ (z >> 27)).wrapping_mul(0x94d049bb133111eb);
z = z ^ (z >> 31);
chunk.copy_from_slice(&z.to_le_bytes());
}
Self::from_seed(seed)
}
}
impl RngCore for Xoshiro128PlusPlus {
#[inline]
fn next_u32(&mut self) -> u32 {
let result_starstar = self.s[0]
.wrapping_add(self.s[3])
.rotate_left(7)
.wrapping_add(self.s[0]);
let t = self.s[1] << 9;
self.s[2] ^= self.s[0];
self.s[3] ^= self.s[1];
self.s[1] ^= self.s[2];
self.s[0] ^= self.s[3];
self.s[2] ^= t;
self.s[3] = self.s[3].rotate_left(11);
result_starstar
}
#[inline]
fn next_u64(&mut self) -> u64 {
next_u64_via_u32(self)
}
#[inline]
fn fill_bytes(&mut self, dest: &mut [u8]) {
fill_bytes_via_next(self, dest);
}
#[inline]
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
self.fill_bytes(dest);
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn reference() {
let mut rng = Xoshiro128PlusPlus::from_seed(
[1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0]);
// These values were produced with the reference implementation:
// http://xoshiro.di.unimi.it/xoshiro128plusplus.c
let expected = [
641, 1573767, 3222811527, 3517856514, 836907274, 4247214768,
3867114732, 1355841295, 495546011, 621204420,
];
for &e in &expected {
assert_eq!(rng.next_u32(), e);
}
}
}
| {
return Self::seed_from_u64(0);
} | conditional_block |
xoshiro128plusplus.rs | // Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[cfg(feature="serde1")] use serde::{Serialize, Deserialize};
use rand_core::impls::{next_u64_via_u32, fill_bytes_via_next};
use rand_core::le::read_u32_into;
use rand_core::{SeedableRng, RngCore, Error};
/// A xoshiro128++ random number generator.
///
/// The xoshiro128++ algorithm is not suitable for cryptographic purposes, but
/// is very fast and has excellent statistical properties.
///
/// The algorithm used here is translated from [the `xoshiro128plusplus.c`
/// reference source code](http://xoshiro.di.unimi.it/xoshiro128plusplus.c) by
/// David Blackman and Sebastiano Vigna.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature="serde1", derive(Serialize, Deserialize))]
pub struct Xoshiro128PlusPlus {
s: [u32; 4],
}
impl SeedableRng for Xoshiro128PlusPlus {
type Seed = [u8; 16];
/// Create a new `Xoshiro128PlusPlus`. If `seed` is entirely 0, it will be
/// mapped to a different seed.
#[inline]
fn from_seed(seed: [u8; 16]) -> Xoshiro128PlusPlus {
if seed.iter().all(|&x| x == 0) {
return Self::seed_from_u64(0);
}
let mut state = [0; 4];
read_u32_into(&seed, &mut state);
Xoshiro128PlusPlus { s: state }
}
/// Create a new `Xoshiro128PlusPlus` from a `u64` seed.
///
/// This uses the SplitMix64 generator internally.
fn seed_from_u64(mut state: u64) -> Self {
const PHI: u64 = 0x9e3779b97f4a7c15;
let mut seed = Self::Seed::default();
for chunk in seed.as_mut().chunks_mut(8) {
state = state.wrapping_add(PHI);
let mut z = state;
z = (z ^ (z >> 30)).wrapping_mul(0xbf58476d1ce4e5b9);
z = (z ^ (z >> 27)).wrapping_mul(0x94d049bb133111eb);
z = z ^ (z >> 31);
chunk.copy_from_slice(&z.to_le_bytes());
}
Self::from_seed(seed)
}
}
impl RngCore for Xoshiro128PlusPlus {
#[inline]
fn next_u32(&mut self) -> u32 {
let result_starstar = self.s[0]
.wrapping_add(self.s[3])
.rotate_left(7)
.wrapping_add(self.s[0]);
let t = self.s[1] << 9;
self.s[2] ^= self.s[0];
self.s[3] ^= self.s[1];
self.s[1] ^= self.s[2];
self.s[0] ^= self.s[3];
self.s[2] ^= t;
self.s[3] = self.s[3].rotate_left(11);
result_starstar
}
#[inline]
fn next_u64(&mut self) -> u64 {
next_u64_via_u32(self)
}
#[inline]
fn fill_bytes(&mut self, dest: &mut [u8]) {
fill_bytes_via_next(self, dest);
}
#[inline]
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
self.fill_bytes(dest);
Ok(())
} | mod tests {
use super::*;
#[test]
fn reference() {
let mut rng = Xoshiro128PlusPlus::from_seed(
[1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0]);
// These values were produced with the reference implementation:
// http://xoshiro.di.unimi.it/xoshiro128plusplus.c
let expected = [
641, 1573767, 3222811527, 3517856514, 836907274, 4247214768,
3867114732, 1355841295, 495546011, 621204420,
];
for &e in &expected {
assert_eq!(rng.next_u32(), e);
}
}
} | }
#[cfg(test)] | random_line_split |
sr_masterctl.rs | // SairaDB - A distributed database
// Copyright (C) 2015 by Siyu Wang
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation; either version 2
// of the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
use std::collections::HashMap;
use std::net::TcpStream;
use std::thread;
use std::sync::Arc;
use std::sync::mpsc::Sender;
use std::sync::atomic::AtomicUsize;
use super::libc;
pub fn init(masters: Vec<String>, vnodes: Vec<u64>,
map: &HashMap<String, String>, log_sender: Sender<String>) {
let vnodes = Arc::new(vnodes);
let cookie = map.get("cookie-master").unwrap().to_string();
let port = map.get("master-port").unwrap().to_string();
for master in masters {
let vnodes = vnodes.clone();
let log_sender = log_sender.clone();
let master = master.to_string();
let cookie = cookie.to_string();
let port = port.to_string();
let _ = thread::Builder::new().name(format!("master_task({})", master))
.spawn(|| {
master_task(master, port, vnodes, cookie, log_sender);
});
}
}
fn master_task(ip: String, port: String, vnodes: Arc<Vec<u64>>, cookie: String,
log_sender: Sender<String>) |
fn master_connection() {
}
| {
let addr: &str = &(ip + ":" + &port);
let count = Arc::new(AtomicUsize::new(0));
loop {
let stream = TcpStream::connect(addr);
//match stream.write_all(cookie.as_bytes());
}
} | identifier_body |
sr_masterctl.rs | // SairaDB - A distributed database
// Copyright (C) 2015 by Siyu Wang
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation; either version 2
// of the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
use std::collections::HashMap;
use std::net::TcpStream;
use std::thread;
use std::sync::Arc;
use std::sync::mpsc::Sender;
use std::sync::atomic::AtomicUsize;
use super::libc;
pub fn init(masters: Vec<String>, vnodes: Vec<u64>,
map: &HashMap<String, String>, log_sender: Sender<String>) {
let vnodes = Arc::new(vnodes);
let cookie = map.get("cookie-master").unwrap().to_string();
let port = map.get("master-port").unwrap().to_string();
for master in masters {
let vnodes = vnodes.clone();
let log_sender = log_sender.clone();
let master = master.to_string();
let cookie = cookie.to_string();
let port = port.to_string();
let _ = thread::Builder::new().name(format!("master_task({})", master))
.spawn(|| {
master_task(master, port, vnodes, cookie, log_sender);
});
}
}
fn | (ip: String, port: String, vnodes: Arc<Vec<u64>>, cookie: String,
log_sender: Sender<String>) {
let addr: &str = &(ip + ":" + &port);
let count = Arc::new(AtomicUsize::new(0));
loop {
let stream = TcpStream::connect(addr);
//match stream.write_all(cookie.as_bytes());
}
}
fn master_connection() {
}
| master_task | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.