file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
test_fmt.rs
|
#![allow(clippy::from_iter_instead_of_collect)]
use proc_macro2::{Delimiter, Group, Ident, Span, TokenStream, TokenTree};
use std::iter::{self, FromIterator};
#[test]
|
fn test_fmt_group() {
let ident = Ident::new("x", Span::call_site());
let inner = TokenStream::from_iter(iter::once(TokenTree::Ident(ident)));
let parens_empty = Group::new(Delimiter::Parenthesis, TokenStream::new());
let parens_nonempty = Group::new(Delimiter::Parenthesis, inner.clone());
let brackets_empty = Group::new(Delimiter::Bracket, TokenStream::new());
let brackets_nonempty = Group::new(Delimiter::Bracket, inner.clone());
let braces_empty = Group::new(Delimiter::Brace, TokenStream::new());
let braces_nonempty = Group::new(Delimiter::Brace, inner.clone());
let none_empty = Group::new(Delimiter::None, TokenStream::new());
let none_nonempty = Group::new(Delimiter::None, inner);
// Matches libproc_macro.
assert_eq!("()", parens_empty.to_string());
assert_eq!("(x)", parens_nonempty.to_string());
assert_eq!("[]", brackets_empty.to_string());
assert_eq!("[x]", brackets_nonempty.to_string());
assert_eq!("{ }", braces_empty.to_string());
assert_eq!("{ x }", braces_nonempty.to_string());
assert_eq!("", none_empty.to_string());
assert_eq!("x", none_nonempty.to_string());
}
|
random_line_split
|
|
test_fmt.rs
|
#![allow(clippy::from_iter_instead_of_collect)]
use proc_macro2::{Delimiter, Group, Ident, Span, TokenStream, TokenTree};
use std::iter::{self, FromIterator};
#[test]
fn
|
() {
let ident = Ident::new("x", Span::call_site());
let inner = TokenStream::from_iter(iter::once(TokenTree::Ident(ident)));
let parens_empty = Group::new(Delimiter::Parenthesis, TokenStream::new());
let parens_nonempty = Group::new(Delimiter::Parenthesis, inner.clone());
let brackets_empty = Group::new(Delimiter::Bracket, TokenStream::new());
let brackets_nonempty = Group::new(Delimiter::Bracket, inner.clone());
let braces_empty = Group::new(Delimiter::Brace, TokenStream::new());
let braces_nonempty = Group::new(Delimiter::Brace, inner.clone());
let none_empty = Group::new(Delimiter::None, TokenStream::new());
let none_nonempty = Group::new(Delimiter::None, inner);
// Matches libproc_macro.
assert_eq!("()", parens_empty.to_string());
assert_eq!("(x)", parens_nonempty.to_string());
assert_eq!("[]", brackets_empty.to_string());
assert_eq!("[x]", brackets_nonempty.to_string());
assert_eq!("{ }", braces_empty.to_string());
assert_eq!("{ x }", braces_nonempty.to_string());
assert_eq!("", none_empty.to_string());
assert_eq!("x", none_nonempty.to_string());
}
|
test_fmt_group
|
identifier_name
|
test_fmt.rs
|
#![allow(clippy::from_iter_instead_of_collect)]
use proc_macro2::{Delimiter, Group, Ident, Span, TokenStream, TokenTree};
use std::iter::{self, FromIterator};
#[test]
fn test_fmt_group()
|
assert_eq!("x", none_nonempty.to_string());
}
|
{
let ident = Ident::new("x", Span::call_site());
let inner = TokenStream::from_iter(iter::once(TokenTree::Ident(ident)));
let parens_empty = Group::new(Delimiter::Parenthesis, TokenStream::new());
let parens_nonempty = Group::new(Delimiter::Parenthesis, inner.clone());
let brackets_empty = Group::new(Delimiter::Bracket, TokenStream::new());
let brackets_nonempty = Group::new(Delimiter::Bracket, inner.clone());
let braces_empty = Group::new(Delimiter::Brace, TokenStream::new());
let braces_nonempty = Group::new(Delimiter::Brace, inner.clone());
let none_empty = Group::new(Delimiter::None, TokenStream::new());
let none_nonempty = Group::new(Delimiter::None, inner);
// Matches libproc_macro.
assert_eq!("()", parens_empty.to_string());
assert_eq!("(x)", parens_nonempty.to_string());
assert_eq!("[]", brackets_empty.to_string());
assert_eq!("[x]", brackets_nonempty.to_string());
assert_eq!("{ }", braces_empty.to_string());
assert_eq!("{ x }", braces_nonempty.to_string());
assert_eq!("", none_empty.to_string());
|
identifier_body
|
restyle_damage.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The restyle damage is a hint that tells layout which kind of operations may
//! be needed in presence of incremental style changes.
#![deny(missing_docs)]
use computed_values::display;
use heapsize::HeapSizeOf;
use matching::{StyleChange, StyleDifference};
use properties::ComputedValues;
use std::fmt;
bitflags! {
#[doc = "Individual layout actions that may be necessary after restyling."]
pub flags ServoRestyleDamage: u8 {
#[doc = "Repaint the node itself."]
#[doc = "Currently unused; need to decide how this propagates."]
const REPAINT = 0x01,
#[doc = "The stacking-context-relative position of this node or its descendants has \
changed."]
#[doc = "Propagates both up and down the flow tree."]
const REPOSITION = 0x02,
#[doc = "Recompute the overflow regions (bounding box of object and all descendants)."]
#[doc = "Propagates down the flow tree because the computation is bottom-up."]
const STORE_OVERFLOW = 0x04,
#[doc = "Recompute intrinsic inline_sizes (minimum and preferred)."]
#[doc = "Propagates down the flow tree because the computation is"]
#[doc = "bottom-up."]
const BUBBLE_ISIZES = 0x08,
#[doc = "Recompute actual inline-sizes and block-sizes, only taking out-of-flow children \
into account. \
Propagates up the flow tree because the computation is top-down."]
const REFLOW_OUT_OF_FLOW = 0x10,
#[doc = "Recompute actual inline_sizes and block_sizes."]
#[doc = "Propagates up the flow tree because the computation is"]
#[doc = "top-down."]
const REFLOW = 0x20,
#[doc = "Re-resolve generated content. \
Propagates up the flow tree because the computation is inorder."]
const RESOLVE_GENERATED_CONTENT = 0x40,
#[doc = "The entire flow needs to be reconstructed."]
const RECONSTRUCT_FLOW = 0x80
}
}
impl HeapSizeOf for ServoRestyleDamage {
fn heap_size_of_children(&self) -> usize { 0 }
}
impl ServoRestyleDamage {
/// Compute the `StyleDifference` (including the appropriate restyle damage)
/// for a given style change between `old` and `new`.
pub fn compute_style_difference(_source: &ComputedValues,
old: &ComputedValues,
new: &ComputedValues)
-> StyleDifference {
let damage = compute_damage(old, new);
let change = if damage.is_empty() { StyleChange::Unchanged } else { StyleChange::Changed };
StyleDifference::new(damage, change)
}
/// Computes the `StyleDifference` between the two `ComputedValues` objects
/// for the case where the old and new style are both `display: none`.
///
/// For Servo we never need to generate any damage for such elements.
pub fn compute_undisplayed_style_difference(
_old_style: &ComputedValues,
_new_style: &ComputedValues,
) -> StyleDifference {
StyleDifference::new(Self::empty(), StyleChange::Unchanged)
}
/// Returns a bitmask that represents a flow that needs to be rebuilt and
/// reflowed.
///
/// FIXME(bholley): Do we ever actually need this? Shouldn't
/// RECONSTRUCT_FLOW imply everything else?
pub fn rebuild_and_reflow() -> ServoRestyleDamage {
REPAINT | REPOSITION | STORE_OVERFLOW | BUBBLE_ISIZES | REFLOW_OUT_OF_FLOW | REFLOW |
RECONSTRUCT_FLOW
}
/// Returns a bitmask indicating that the frame needs to be reconstructed.
pub fn reconstruct() -> ServoRestyleDamage {
RECONSTRUCT_FLOW
}
/// Supposing a flow has the given `position` property and this damage,
/// returns the damage that we should add to the *parent* of this flow.
pub fn damage_for_parent(self, child_is_absolutely_positioned: bool) -> ServoRestyleDamage {
if child_is_absolutely_positioned {
self & (REPAINT | REPOSITION | STORE_OVERFLOW | REFLOW_OUT_OF_FLOW |
RESOLVE_GENERATED_CONTENT)
} else {
self & (REPAINT | REPOSITION | STORE_OVERFLOW | REFLOW | REFLOW_OUT_OF_FLOW |
RESOLVE_GENERATED_CONTENT)
}
}
/// Supposing the *parent* of a flow with the given `position` property has
/// this damage, returns the damage that we should add to this flow.
pub fn damage_for_child(self,
parent_is_absolutely_positioned: bool,
child_is_absolutely_positioned: bool)
-> ServoRestyleDamage {
match (parent_is_absolutely_positioned, child_is_absolutely_positioned) {
(false, true) => {
// Absolute children are out-of-flow and therefore insulated from changes.
//
// FIXME(pcwalton): Au contraire, if the containing block dimensions change!
self & (REPAINT | REPOSITION)
}
(true, false) => {
// Changing the position of an absolutely-positioned block requires us to reflow
// its kids.
if self.contains(REFLOW_OUT_OF_FLOW) {
self | REFLOW
} else
|
}
_ => {
// TODO(pcwalton): Take floatedness into account.
self & (REPAINT | REPOSITION | REFLOW)
}
}
}
/// Servo doesn't implement this optimization.
pub fn handled_for_descendants(self) -> Self {
Self::empty()
}
}
impl Default for ServoRestyleDamage {
fn default() -> Self {
Self::empty()
}
}
impl fmt::Display for ServoRestyleDamage {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let mut first_elem = true;
let to_iter =
[ (REPAINT, "Repaint")
, (REPOSITION, "Reposition")
, (STORE_OVERFLOW, "StoreOverflow")
, (BUBBLE_ISIZES, "BubbleISizes")
, (REFLOW_OUT_OF_FLOW, "ReflowOutOfFlow")
, (REFLOW, "Reflow")
, (RESOLVE_GENERATED_CONTENT, "ResolveGeneratedContent")
, (RECONSTRUCT_FLOW, "ReconstructFlow")
];
for &(damage, damage_str) in &to_iter {
if self.contains(damage) {
if!first_elem { write!(f, " | ")?; }
write!(f, "{}", damage_str)?;
first_elem = false;
}
}
if first_elem {
write!(f, "NoDamage")?;
}
Ok(())
}
}
// NB: We need the braces inside the RHS due to Rust #8012. This particular
// version of this macro might be safe anyway, but we want to avoid silent
// breakage on modifications.
macro_rules! add_if_not_equal(
($old:ident, $new:ident, $damage:ident,
[ $($effect:ident),* ], [ $($style_struct_getter:ident.$name:ident),* ]) => ({
if $( ($old.$style_struct_getter().$name!= $new.$style_struct_getter().$name) )||* {
$damage.insert($($effect)|*);
true
} else {
false
}
})
);
fn compute_damage(old: &ComputedValues, new: &ComputedValues) -> ServoRestyleDamage {
let mut damage = ServoRestyleDamage::empty();
// This should check every CSS property, as enumerated in the fields of
// http://doc.servo.org/style/properties/struct.ComputedValues.html
// FIXME: Test somehow that every property is included.
add_if_not_equal!(old, new, damage,
[REPAINT, REPOSITION, STORE_OVERFLOW, BUBBLE_ISIZES, REFLOW_OUT_OF_FLOW,
REFLOW, RECONSTRUCT_FLOW], [
get_box.clear, get_box.float, get_box.display, get_box.position, get_counters.content,
get_counters.counter_reset, get_counters.counter_increment,
get_inheritedbox._servo_under_display_none,
get_list.quotes, get_list.list_style_type,
// If these text or font properties change, we need to reconstruct the flow so that
// text shaping is re-run.
get_inheritedtext.letter_spacing, get_inheritedtext.text_rendering,
get_inheritedtext.text_transform, get_inheritedtext.word_spacing,
get_inheritedtext.overflow_wrap, get_inheritedtext.text_justify,
get_inheritedtext.white_space, get_inheritedtext.word_break, get_text.text_overflow,
get_font.font_family, get_font.font_style, get_font.font_variant_caps, get_font.font_weight,
get_font.font_size, get_font.font_stretch,
get_inheritedbox.direction, get_inheritedbox.writing_mode,
get_text.text_decoration_line, get_text.unicode_bidi,
get_inheritedtable.empty_cells, get_inheritedtable.caption_side,
get_column.column_width, get_column.column_count
]) || (new.get_box().display == display::T::inline &&
add_if_not_equal!(old, new, damage,
[REPAINT, REPOSITION, STORE_OVERFLOW, BUBBLE_ISIZES,
REFLOW_OUT_OF_FLOW, REFLOW, RECONSTRUCT_FLOW], [
// For inline boxes only, border/padding styles are used in flow construction (to decide
// whether to create fragments for empty flows).
get_border.border_top_width, get_border.border_right_width,
get_border.border_bottom_width, get_border.border_left_width,
get_padding.padding_top, get_padding.padding_right,
get_padding.padding_bottom, get_padding.padding_left
])) || add_if_not_equal!(old, new, damage,
[REPAINT, REPOSITION, STORE_OVERFLOW, BUBBLE_ISIZES,
REFLOW_OUT_OF_FLOW, REFLOW],
[get_border.border_top_width, get_border.border_right_width,
get_border.border_bottom_width, get_border.border_left_width,
get_margin.margin_top, get_margin.margin_right,
get_margin.margin_bottom, get_margin.margin_left,
get_padding.padding_top, get_padding.padding_right,
get_padding.padding_bottom, get_padding.padding_left,
get_position.width, get_position.height,
get_inheritedtext.line_height,
get_inheritedtext.text_align, get_inheritedtext.text_indent,
get_table.table_layout,
get_inheritedtable.border_collapse,
get_inheritedtable.border_spacing,
get_column.column_gap,
get_position.flex_direction,
get_position.flex_wrap,
get_position.justify_content,
get_position.align_items,
get_position.align_content,
get_position.order,
get_position.flex_basis,
get_position.flex_grow,
get_position.flex_shrink,
get_position.align_self
]) || add_if_not_equal!(old, new, damage,
[REPAINT, REPOSITION, STORE_OVERFLOW, REFLOW_OUT_OF_FLOW], [
get_position.top, get_position.left,
get_position.right, get_position.bottom,
get_effects.opacity,
get_box.transform, get_box.transform_style, get_box.transform_origin,
get_box.perspective, get_box.perspective_origin
]) || add_if_not_equal!(old, new, damage,
[REPAINT], [
get_color.color, get_background.background_color,
get_background.background_image, get_background.background_position_x,
get_background.background_position_y, get_background.background_repeat,
get_background.background_attachment, get_background.background_clip,
get_background.background_origin, get_background.background_size,
get_border.border_top_color, get_border.border_right_color,
get_border.border_bottom_color, get_border.border_left_color,
get_border.border_top_style, get_border.border_right_style,
get_border.border_bottom_style, get_border.border_left_style,
get_border.border_top_left_radius, get_border.border_top_right_radius,
get_border.border_bottom_left_radius, get_border.border_bottom_right_radius,
get_position.z_index, get_box._servo_overflow_clip_box,
get_inheritedtext._servo_text_decorations_in_effect,
get_pointing.cursor, get_pointing.pointer_events,
get_effects.box_shadow, get_effects.clip, get_inheritedtext.text_shadow, get_effects.filter,
get_effects.mix_blend_mode, get_inheritedbox.image_rendering,
// Note: May require REFLOW et al. if `visibility: collapse` is implemented.
get_inheritedbox.visibility
]);
// Paint worklets may depend on custom properties,
// so if they have changed we should repaint.
if old.get_custom_properties()!= new.get_custom_properties() {
damage.insert(REPAINT);
}
// If the layer requirements of this flow have changed due to the value
// of the transform, then reflow is required to rebuild the layers.
if old.transform_requires_layer()!= new.transform_requires_layer() {
damage.insert(ServoRestyleDamage::rebuild_and_reflow());
}
damage
}
|
{
self
}
|
conditional_block
|
restyle_damage.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The restyle damage is a hint that tells layout which kind of operations may
//! be needed in presence of incremental style changes.
#![deny(missing_docs)]
use computed_values::display;
use heapsize::HeapSizeOf;
use matching::{StyleChange, StyleDifference};
use properties::ComputedValues;
use std::fmt;
bitflags! {
#[doc = "Individual layout actions that may be necessary after restyling."]
pub flags ServoRestyleDamage: u8 {
#[doc = "Repaint the node itself."]
#[doc = "Currently unused; need to decide how this propagates."]
const REPAINT = 0x01,
#[doc = "The stacking-context-relative position of this node or its descendants has \
changed."]
#[doc = "Propagates both up and down the flow tree."]
const REPOSITION = 0x02,
#[doc = "Recompute the overflow regions (bounding box of object and all descendants)."]
#[doc = "Propagates down the flow tree because the computation is bottom-up."]
const STORE_OVERFLOW = 0x04,
#[doc = "Recompute intrinsic inline_sizes (minimum and preferred)."]
#[doc = "Propagates down the flow tree because the computation is"]
#[doc = "bottom-up."]
const BUBBLE_ISIZES = 0x08,
#[doc = "Recompute actual inline-sizes and block-sizes, only taking out-of-flow children \
into account. \
Propagates up the flow tree because the computation is top-down."]
const REFLOW_OUT_OF_FLOW = 0x10,
#[doc = "Recompute actual inline_sizes and block_sizes."]
#[doc = "Propagates up the flow tree because the computation is"]
#[doc = "top-down."]
const REFLOW = 0x20,
#[doc = "Re-resolve generated content. \
Propagates up the flow tree because the computation is inorder."]
const RESOLVE_GENERATED_CONTENT = 0x40,
#[doc = "The entire flow needs to be reconstructed."]
const RECONSTRUCT_FLOW = 0x80
}
}
impl HeapSizeOf for ServoRestyleDamage {
fn heap_size_of_children(&self) -> usize
|
}
impl ServoRestyleDamage {
/// Compute the `StyleDifference` (including the appropriate restyle damage)
/// for a given style change between `old` and `new`.
pub fn compute_style_difference(_source: &ComputedValues,
old: &ComputedValues,
new: &ComputedValues)
-> StyleDifference {
let damage = compute_damage(old, new);
let change = if damage.is_empty() { StyleChange::Unchanged } else { StyleChange::Changed };
StyleDifference::new(damage, change)
}
/// Computes the `StyleDifference` between the two `ComputedValues` objects
/// for the case where the old and new style are both `display: none`.
///
/// For Servo we never need to generate any damage for such elements.
pub fn compute_undisplayed_style_difference(
_old_style: &ComputedValues,
_new_style: &ComputedValues,
) -> StyleDifference {
StyleDifference::new(Self::empty(), StyleChange::Unchanged)
}
/// Returns a bitmask that represents a flow that needs to be rebuilt and
/// reflowed.
///
/// FIXME(bholley): Do we ever actually need this? Shouldn't
/// RECONSTRUCT_FLOW imply everything else?
pub fn rebuild_and_reflow() -> ServoRestyleDamage {
REPAINT | REPOSITION | STORE_OVERFLOW | BUBBLE_ISIZES | REFLOW_OUT_OF_FLOW | REFLOW |
RECONSTRUCT_FLOW
}
/// Returns a bitmask indicating that the frame needs to be reconstructed.
pub fn reconstruct() -> ServoRestyleDamage {
RECONSTRUCT_FLOW
}
/// Supposing a flow has the given `position` property and this damage,
/// returns the damage that we should add to the *parent* of this flow.
pub fn damage_for_parent(self, child_is_absolutely_positioned: bool) -> ServoRestyleDamage {
if child_is_absolutely_positioned {
self & (REPAINT | REPOSITION | STORE_OVERFLOW | REFLOW_OUT_OF_FLOW |
RESOLVE_GENERATED_CONTENT)
} else {
self & (REPAINT | REPOSITION | STORE_OVERFLOW | REFLOW | REFLOW_OUT_OF_FLOW |
RESOLVE_GENERATED_CONTENT)
}
}
/// Supposing the *parent* of a flow with the given `position` property has
/// this damage, returns the damage that we should add to this flow.
pub fn damage_for_child(self,
parent_is_absolutely_positioned: bool,
child_is_absolutely_positioned: bool)
-> ServoRestyleDamage {
match (parent_is_absolutely_positioned, child_is_absolutely_positioned) {
(false, true) => {
// Absolute children are out-of-flow and therefore insulated from changes.
//
// FIXME(pcwalton): Au contraire, if the containing block dimensions change!
self & (REPAINT | REPOSITION)
}
(true, false) => {
// Changing the position of an absolutely-positioned block requires us to reflow
// its kids.
if self.contains(REFLOW_OUT_OF_FLOW) {
self | REFLOW
} else {
self
}
}
_ => {
// TODO(pcwalton): Take floatedness into account.
self & (REPAINT | REPOSITION | REFLOW)
}
}
}
/// Servo doesn't implement this optimization.
pub fn handled_for_descendants(self) -> Self {
Self::empty()
}
}
impl Default for ServoRestyleDamage {
fn default() -> Self {
Self::empty()
}
}
impl fmt::Display for ServoRestyleDamage {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let mut first_elem = true;
let to_iter =
[ (REPAINT, "Repaint")
, (REPOSITION, "Reposition")
, (STORE_OVERFLOW, "StoreOverflow")
, (BUBBLE_ISIZES, "BubbleISizes")
, (REFLOW_OUT_OF_FLOW, "ReflowOutOfFlow")
, (REFLOW, "Reflow")
, (RESOLVE_GENERATED_CONTENT, "ResolveGeneratedContent")
, (RECONSTRUCT_FLOW, "ReconstructFlow")
];
for &(damage, damage_str) in &to_iter {
if self.contains(damage) {
if!first_elem { write!(f, " | ")?; }
write!(f, "{}", damage_str)?;
first_elem = false;
}
}
if first_elem {
write!(f, "NoDamage")?;
}
Ok(())
}
}
// NB: We need the braces inside the RHS due to Rust #8012. This particular
// version of this macro might be safe anyway, but we want to avoid silent
// breakage on modifications.
macro_rules! add_if_not_equal(
($old:ident, $new:ident, $damage:ident,
[ $($effect:ident),* ], [ $($style_struct_getter:ident.$name:ident),* ]) => ({
if $( ($old.$style_struct_getter().$name!= $new.$style_struct_getter().$name) )||* {
$damage.insert($($effect)|*);
true
} else {
false
}
})
);
fn compute_damage(old: &ComputedValues, new: &ComputedValues) -> ServoRestyleDamage {
let mut damage = ServoRestyleDamage::empty();
// This should check every CSS property, as enumerated in the fields of
// http://doc.servo.org/style/properties/struct.ComputedValues.html
// FIXME: Test somehow that every property is included.
add_if_not_equal!(old, new, damage,
[REPAINT, REPOSITION, STORE_OVERFLOW, BUBBLE_ISIZES, REFLOW_OUT_OF_FLOW,
REFLOW, RECONSTRUCT_FLOW], [
get_box.clear, get_box.float, get_box.display, get_box.position, get_counters.content,
get_counters.counter_reset, get_counters.counter_increment,
get_inheritedbox._servo_under_display_none,
get_list.quotes, get_list.list_style_type,
// If these text or font properties change, we need to reconstruct the flow so that
// text shaping is re-run.
get_inheritedtext.letter_spacing, get_inheritedtext.text_rendering,
get_inheritedtext.text_transform, get_inheritedtext.word_spacing,
get_inheritedtext.overflow_wrap, get_inheritedtext.text_justify,
get_inheritedtext.white_space, get_inheritedtext.word_break, get_text.text_overflow,
get_font.font_family, get_font.font_style, get_font.font_variant_caps, get_font.font_weight,
get_font.font_size, get_font.font_stretch,
get_inheritedbox.direction, get_inheritedbox.writing_mode,
get_text.text_decoration_line, get_text.unicode_bidi,
get_inheritedtable.empty_cells, get_inheritedtable.caption_side,
get_column.column_width, get_column.column_count
]) || (new.get_box().display == display::T::inline &&
add_if_not_equal!(old, new, damage,
[REPAINT, REPOSITION, STORE_OVERFLOW, BUBBLE_ISIZES,
REFLOW_OUT_OF_FLOW, REFLOW, RECONSTRUCT_FLOW], [
// For inline boxes only, border/padding styles are used in flow construction (to decide
// whether to create fragments for empty flows).
get_border.border_top_width, get_border.border_right_width,
get_border.border_bottom_width, get_border.border_left_width,
get_padding.padding_top, get_padding.padding_right,
get_padding.padding_bottom, get_padding.padding_left
])) || add_if_not_equal!(old, new, damage,
[REPAINT, REPOSITION, STORE_OVERFLOW, BUBBLE_ISIZES,
REFLOW_OUT_OF_FLOW, REFLOW],
[get_border.border_top_width, get_border.border_right_width,
get_border.border_bottom_width, get_border.border_left_width,
get_margin.margin_top, get_margin.margin_right,
get_margin.margin_bottom, get_margin.margin_left,
get_padding.padding_top, get_padding.padding_right,
get_padding.padding_bottom, get_padding.padding_left,
get_position.width, get_position.height,
get_inheritedtext.line_height,
get_inheritedtext.text_align, get_inheritedtext.text_indent,
get_table.table_layout,
get_inheritedtable.border_collapse,
get_inheritedtable.border_spacing,
get_column.column_gap,
get_position.flex_direction,
get_position.flex_wrap,
get_position.justify_content,
get_position.align_items,
get_position.align_content,
get_position.order,
get_position.flex_basis,
get_position.flex_grow,
get_position.flex_shrink,
get_position.align_self
]) || add_if_not_equal!(old, new, damage,
[REPAINT, REPOSITION, STORE_OVERFLOW, REFLOW_OUT_OF_FLOW], [
get_position.top, get_position.left,
get_position.right, get_position.bottom,
get_effects.opacity,
get_box.transform, get_box.transform_style, get_box.transform_origin,
get_box.perspective, get_box.perspective_origin
]) || add_if_not_equal!(old, new, damage,
[REPAINT], [
get_color.color, get_background.background_color,
get_background.background_image, get_background.background_position_x,
get_background.background_position_y, get_background.background_repeat,
get_background.background_attachment, get_background.background_clip,
get_background.background_origin, get_background.background_size,
get_border.border_top_color, get_border.border_right_color,
get_border.border_bottom_color, get_border.border_left_color,
get_border.border_top_style, get_border.border_right_style,
get_border.border_bottom_style, get_border.border_left_style,
get_border.border_top_left_radius, get_border.border_top_right_radius,
get_border.border_bottom_left_radius, get_border.border_bottom_right_radius,
get_position.z_index, get_box._servo_overflow_clip_box,
get_inheritedtext._servo_text_decorations_in_effect,
get_pointing.cursor, get_pointing.pointer_events,
get_effects.box_shadow, get_effects.clip, get_inheritedtext.text_shadow, get_effects.filter,
get_effects.mix_blend_mode, get_inheritedbox.image_rendering,
// Note: May require REFLOW et al. if `visibility: collapse` is implemented.
get_inheritedbox.visibility
]);
// Paint worklets may depend on custom properties,
// so if they have changed we should repaint.
if old.get_custom_properties()!= new.get_custom_properties() {
damage.insert(REPAINT);
}
// If the layer requirements of this flow have changed due to the value
// of the transform, then reflow is required to rebuild the layers.
if old.transform_requires_layer()!= new.transform_requires_layer() {
damage.insert(ServoRestyleDamage::rebuild_and_reflow());
}
damage
}
|
{ 0 }
|
identifier_body
|
restyle_damage.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The restyle damage is a hint that tells layout which kind of operations may
//! be needed in presence of incremental style changes.
#![deny(missing_docs)]
use computed_values::display;
use heapsize::HeapSizeOf;
use matching::{StyleChange, StyleDifference};
use properties::ComputedValues;
use std::fmt;
bitflags! {
#[doc = "Individual layout actions that may be necessary after restyling."]
pub flags ServoRestyleDamage: u8 {
#[doc = "Repaint the node itself."]
#[doc = "Currently unused; need to decide how this propagates."]
const REPAINT = 0x01,
#[doc = "The stacking-context-relative position of this node or its descendants has \
changed."]
#[doc = "Propagates both up and down the flow tree."]
const REPOSITION = 0x02,
#[doc = "Recompute the overflow regions (bounding box of object and all descendants)."]
#[doc = "Propagates down the flow tree because the computation is bottom-up."]
const STORE_OVERFLOW = 0x04,
#[doc = "Recompute intrinsic inline_sizes (minimum and preferred)."]
#[doc = "Propagates down the flow tree because the computation is"]
#[doc = "bottom-up."]
const BUBBLE_ISIZES = 0x08,
#[doc = "Recompute actual inline-sizes and block-sizes, only taking out-of-flow children \
into account. \
Propagates up the flow tree because the computation is top-down."]
const REFLOW_OUT_OF_FLOW = 0x10,
#[doc = "Recompute actual inline_sizes and block_sizes."]
#[doc = "Propagates up the flow tree because the computation is"]
#[doc = "top-down."]
const REFLOW = 0x20,
#[doc = "Re-resolve generated content. \
Propagates up the flow tree because the computation is inorder."]
const RESOLVE_GENERATED_CONTENT = 0x40,
#[doc = "The entire flow needs to be reconstructed."]
const RECONSTRUCT_FLOW = 0x80
}
}
impl HeapSizeOf for ServoRestyleDamage {
fn heap_size_of_children(&self) -> usize { 0 }
}
impl ServoRestyleDamage {
/// Compute the `StyleDifference` (including the appropriate restyle damage)
/// for a given style change between `old` and `new`.
pub fn compute_style_difference(_source: &ComputedValues,
old: &ComputedValues,
new: &ComputedValues)
-> StyleDifference {
let damage = compute_damage(old, new);
let change = if damage.is_empty() { StyleChange::Unchanged } else { StyleChange::Changed };
StyleDifference::new(damage, change)
}
/// Computes the `StyleDifference` between the two `ComputedValues` objects
/// for the case where the old and new style are both `display: none`.
///
/// For Servo we never need to generate any damage for such elements.
pub fn compute_undisplayed_style_difference(
_old_style: &ComputedValues,
_new_style: &ComputedValues,
) -> StyleDifference {
StyleDifference::new(Self::empty(), StyleChange::Unchanged)
}
/// Returns a bitmask that represents a flow that needs to be rebuilt and
/// reflowed.
///
/// FIXME(bholley): Do we ever actually need this? Shouldn't
/// RECONSTRUCT_FLOW imply everything else?
pub fn rebuild_and_reflow() -> ServoRestyleDamage {
REPAINT | REPOSITION | STORE_OVERFLOW | BUBBLE_ISIZES | REFLOW_OUT_OF_FLOW | REFLOW |
RECONSTRUCT_FLOW
}
/// Returns a bitmask indicating that the frame needs to be reconstructed.
pub fn reconstruct() -> ServoRestyleDamage {
RECONSTRUCT_FLOW
}
/// Supposing a flow has the given `position` property and this damage,
/// returns the damage that we should add to the *parent* of this flow.
pub fn damage_for_parent(self, child_is_absolutely_positioned: bool) -> ServoRestyleDamage {
if child_is_absolutely_positioned {
self & (REPAINT | REPOSITION | STORE_OVERFLOW | REFLOW_OUT_OF_FLOW |
RESOLVE_GENERATED_CONTENT)
} else {
self & (REPAINT | REPOSITION | STORE_OVERFLOW | REFLOW | REFLOW_OUT_OF_FLOW |
RESOLVE_GENERATED_CONTENT)
}
}
/// Supposing the *parent* of a flow with the given `position` property has
/// this damage, returns the damage that we should add to this flow.
pub fn damage_for_child(self,
parent_is_absolutely_positioned: bool,
child_is_absolutely_positioned: bool)
-> ServoRestyleDamage {
match (parent_is_absolutely_positioned, child_is_absolutely_positioned) {
(false, true) => {
// Absolute children are out-of-flow and therefore insulated from changes.
//
// FIXME(pcwalton): Au contraire, if the containing block dimensions change!
self & (REPAINT | REPOSITION)
}
(true, false) => {
// Changing the position of an absolutely-positioned block requires us to reflow
// its kids.
if self.contains(REFLOW_OUT_OF_FLOW) {
self | REFLOW
} else {
self
}
}
_ => {
// TODO(pcwalton): Take floatedness into account.
self & (REPAINT | REPOSITION | REFLOW)
}
}
}
/// Servo doesn't implement this optimization.
pub fn handled_for_descendants(self) -> Self {
Self::empty()
}
}
impl Default for ServoRestyleDamage {
fn
|
() -> Self {
Self::empty()
}
}
impl fmt::Display for ServoRestyleDamage {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let mut first_elem = true;
let to_iter =
[ (REPAINT, "Repaint")
, (REPOSITION, "Reposition")
, (STORE_OVERFLOW, "StoreOverflow")
, (BUBBLE_ISIZES, "BubbleISizes")
, (REFLOW_OUT_OF_FLOW, "ReflowOutOfFlow")
, (REFLOW, "Reflow")
, (RESOLVE_GENERATED_CONTENT, "ResolveGeneratedContent")
, (RECONSTRUCT_FLOW, "ReconstructFlow")
];
for &(damage, damage_str) in &to_iter {
if self.contains(damage) {
if!first_elem { write!(f, " | ")?; }
write!(f, "{}", damage_str)?;
first_elem = false;
}
}
if first_elem {
write!(f, "NoDamage")?;
}
Ok(())
}
}
// NB: We need the braces inside the RHS due to Rust #8012. This particular
// version of this macro might be safe anyway, but we want to avoid silent
// breakage on modifications.
macro_rules! add_if_not_equal(
($old:ident, $new:ident, $damage:ident,
[ $($effect:ident),* ], [ $($style_struct_getter:ident.$name:ident),* ]) => ({
if $( ($old.$style_struct_getter().$name!= $new.$style_struct_getter().$name) )||* {
$damage.insert($($effect)|*);
true
} else {
false
}
})
);
fn compute_damage(old: &ComputedValues, new: &ComputedValues) -> ServoRestyleDamage {
let mut damage = ServoRestyleDamage::empty();
// This should check every CSS property, as enumerated in the fields of
// http://doc.servo.org/style/properties/struct.ComputedValues.html
// FIXME: Test somehow that every property is included.
add_if_not_equal!(old, new, damage,
[REPAINT, REPOSITION, STORE_OVERFLOW, BUBBLE_ISIZES, REFLOW_OUT_OF_FLOW,
REFLOW, RECONSTRUCT_FLOW], [
get_box.clear, get_box.float, get_box.display, get_box.position, get_counters.content,
get_counters.counter_reset, get_counters.counter_increment,
get_inheritedbox._servo_under_display_none,
get_list.quotes, get_list.list_style_type,
// If these text or font properties change, we need to reconstruct the flow so that
// text shaping is re-run.
get_inheritedtext.letter_spacing, get_inheritedtext.text_rendering,
get_inheritedtext.text_transform, get_inheritedtext.word_spacing,
get_inheritedtext.overflow_wrap, get_inheritedtext.text_justify,
get_inheritedtext.white_space, get_inheritedtext.word_break, get_text.text_overflow,
get_font.font_family, get_font.font_style, get_font.font_variant_caps, get_font.font_weight,
get_font.font_size, get_font.font_stretch,
get_inheritedbox.direction, get_inheritedbox.writing_mode,
get_text.text_decoration_line, get_text.unicode_bidi,
get_inheritedtable.empty_cells, get_inheritedtable.caption_side,
get_column.column_width, get_column.column_count
]) || (new.get_box().display == display::T::inline &&
add_if_not_equal!(old, new, damage,
[REPAINT, REPOSITION, STORE_OVERFLOW, BUBBLE_ISIZES,
REFLOW_OUT_OF_FLOW, REFLOW, RECONSTRUCT_FLOW], [
// For inline boxes only, border/padding styles are used in flow construction (to decide
// whether to create fragments for empty flows).
get_border.border_top_width, get_border.border_right_width,
get_border.border_bottom_width, get_border.border_left_width,
get_padding.padding_top, get_padding.padding_right,
get_padding.padding_bottom, get_padding.padding_left
])) || add_if_not_equal!(old, new, damage,
[REPAINT, REPOSITION, STORE_OVERFLOW, BUBBLE_ISIZES,
REFLOW_OUT_OF_FLOW, REFLOW],
[get_border.border_top_width, get_border.border_right_width,
get_border.border_bottom_width, get_border.border_left_width,
get_margin.margin_top, get_margin.margin_right,
get_margin.margin_bottom, get_margin.margin_left,
get_padding.padding_top, get_padding.padding_right,
get_padding.padding_bottom, get_padding.padding_left,
get_position.width, get_position.height,
get_inheritedtext.line_height,
get_inheritedtext.text_align, get_inheritedtext.text_indent,
get_table.table_layout,
get_inheritedtable.border_collapse,
get_inheritedtable.border_spacing,
get_column.column_gap,
get_position.flex_direction,
get_position.flex_wrap,
get_position.justify_content,
get_position.align_items,
get_position.align_content,
get_position.order,
get_position.flex_basis,
get_position.flex_grow,
get_position.flex_shrink,
get_position.align_self
]) || add_if_not_equal!(old, new, damage,
[REPAINT, REPOSITION, STORE_OVERFLOW, REFLOW_OUT_OF_FLOW], [
get_position.top, get_position.left,
get_position.right, get_position.bottom,
get_effects.opacity,
get_box.transform, get_box.transform_style, get_box.transform_origin,
get_box.perspective, get_box.perspective_origin
]) || add_if_not_equal!(old, new, damage,
[REPAINT], [
get_color.color, get_background.background_color,
get_background.background_image, get_background.background_position_x,
get_background.background_position_y, get_background.background_repeat,
get_background.background_attachment, get_background.background_clip,
get_background.background_origin, get_background.background_size,
get_border.border_top_color, get_border.border_right_color,
get_border.border_bottom_color, get_border.border_left_color,
get_border.border_top_style, get_border.border_right_style,
get_border.border_bottom_style, get_border.border_left_style,
get_border.border_top_left_radius, get_border.border_top_right_radius,
get_border.border_bottom_left_radius, get_border.border_bottom_right_radius,
get_position.z_index, get_box._servo_overflow_clip_box,
get_inheritedtext._servo_text_decorations_in_effect,
get_pointing.cursor, get_pointing.pointer_events,
get_effects.box_shadow, get_effects.clip, get_inheritedtext.text_shadow, get_effects.filter,
get_effects.mix_blend_mode, get_inheritedbox.image_rendering,
// Note: May require REFLOW et al. if `visibility: collapse` is implemented.
get_inheritedbox.visibility
]);
// Paint worklets may depend on custom properties,
// so if they have changed we should repaint.
if old.get_custom_properties()!= new.get_custom_properties() {
damage.insert(REPAINT);
}
// If the layer requirements of this flow have changed due to the value
// of the transform, then reflow is required to rebuild the layers.
if old.transform_requires_layer()!= new.transform_requires_layer() {
damage.insert(ServoRestyleDamage::rebuild_and_reflow());
}
damage
}
|
default
|
identifier_name
|
restyle_damage.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The restyle damage is a hint that tells layout which kind of operations may
//! be needed in presence of incremental style changes.
#![deny(missing_docs)]
use computed_values::display;
use heapsize::HeapSizeOf;
use matching::{StyleChange, StyleDifference};
use properties::ComputedValues;
use std::fmt;
bitflags! {
#[doc = "Individual layout actions that may be necessary after restyling."]
pub flags ServoRestyleDamage: u8 {
#[doc = "Repaint the node itself."]
#[doc = "Currently unused; need to decide how this propagates."]
const REPAINT = 0x01,
#[doc = "The stacking-context-relative position of this node or its descendants has \
changed."]
#[doc = "Propagates both up and down the flow tree."]
const REPOSITION = 0x02,
#[doc = "Recompute the overflow regions (bounding box of object and all descendants)."]
#[doc = "Propagates down the flow tree because the computation is bottom-up."]
const STORE_OVERFLOW = 0x04,
#[doc = "Recompute intrinsic inline_sizes (minimum and preferred)."]
#[doc = "Propagates down the flow tree because the computation is"]
#[doc = "bottom-up."]
const BUBBLE_ISIZES = 0x08,
#[doc = "Recompute actual inline-sizes and block-sizes, only taking out-of-flow children \
into account. \
Propagates up the flow tree because the computation is top-down."]
const REFLOW_OUT_OF_FLOW = 0x10,
#[doc = "Recompute actual inline_sizes and block_sizes."]
#[doc = "Propagates up the flow tree because the computation is"]
#[doc = "top-down."]
const REFLOW = 0x20,
#[doc = "Re-resolve generated content. \
Propagates up the flow tree because the computation is inorder."]
const RESOLVE_GENERATED_CONTENT = 0x40,
#[doc = "The entire flow needs to be reconstructed."]
const RECONSTRUCT_FLOW = 0x80
}
}
impl HeapSizeOf for ServoRestyleDamage {
fn heap_size_of_children(&self) -> usize { 0 }
}
impl ServoRestyleDamage {
/// Compute the `StyleDifference` (including the appropriate restyle damage)
/// for a given style change between `old` and `new`.
pub fn compute_style_difference(_source: &ComputedValues,
old: &ComputedValues,
new: &ComputedValues)
-> StyleDifference {
let damage = compute_damage(old, new);
let change = if damage.is_empty() { StyleChange::Unchanged } else { StyleChange::Changed };
StyleDifference::new(damage, change)
}
/// Computes the `StyleDifference` between the two `ComputedValues` objects
/// for the case where the old and new style are both `display: none`.
///
/// For Servo we never need to generate any damage for such elements.
pub fn compute_undisplayed_style_difference(
_old_style: &ComputedValues,
_new_style: &ComputedValues,
) -> StyleDifference {
StyleDifference::new(Self::empty(), StyleChange::Unchanged)
}
/// Returns a bitmask that represents a flow that needs to be rebuilt and
/// reflowed.
///
/// FIXME(bholley): Do we ever actually need this? Shouldn't
/// RECONSTRUCT_FLOW imply everything else?
pub fn rebuild_and_reflow() -> ServoRestyleDamage {
REPAINT | REPOSITION | STORE_OVERFLOW | BUBBLE_ISIZES | REFLOW_OUT_OF_FLOW | REFLOW |
RECONSTRUCT_FLOW
}
/// Returns a bitmask indicating that the frame needs to be reconstructed.
pub fn reconstruct() -> ServoRestyleDamage {
RECONSTRUCT_FLOW
}
/// Supposing a flow has the given `position` property and this damage,
/// returns the damage that we should add to the *parent* of this flow.
pub fn damage_for_parent(self, child_is_absolutely_positioned: bool) -> ServoRestyleDamage {
if child_is_absolutely_positioned {
self & (REPAINT | REPOSITION | STORE_OVERFLOW | REFLOW_OUT_OF_FLOW |
RESOLVE_GENERATED_CONTENT)
} else {
self & (REPAINT | REPOSITION | STORE_OVERFLOW | REFLOW | REFLOW_OUT_OF_FLOW |
RESOLVE_GENERATED_CONTENT)
}
}
/// Supposing the *parent* of a flow with the given `position` property has
/// this damage, returns the damage that we should add to this flow.
pub fn damage_for_child(self,
parent_is_absolutely_positioned: bool,
child_is_absolutely_positioned: bool)
-> ServoRestyleDamage {
match (parent_is_absolutely_positioned, child_is_absolutely_positioned) {
(false, true) => {
// Absolute children are out-of-flow and therefore insulated from changes.
//
// FIXME(pcwalton): Au contraire, if the containing block dimensions change!
self & (REPAINT | REPOSITION)
}
(true, false) => {
// Changing the position of an absolutely-positioned block requires us to reflow
// its kids.
if self.contains(REFLOW_OUT_OF_FLOW) {
self | REFLOW
} else {
self
}
}
_ => {
// TODO(pcwalton): Take floatedness into account.
self & (REPAINT | REPOSITION | REFLOW)
}
}
}
/// Servo doesn't implement this optimization.
pub fn handled_for_descendants(self) -> Self {
Self::empty()
}
}
impl Default for ServoRestyleDamage {
fn default() -> Self {
Self::empty()
}
}
impl fmt::Display for ServoRestyleDamage {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let mut first_elem = true;
let to_iter =
[ (REPAINT, "Repaint")
, (REPOSITION, "Reposition")
, (STORE_OVERFLOW, "StoreOverflow")
, (BUBBLE_ISIZES, "BubbleISizes")
, (REFLOW_OUT_OF_FLOW, "ReflowOutOfFlow")
, (REFLOW, "Reflow")
, (RESOLVE_GENERATED_CONTENT, "ResolveGeneratedContent")
, (RECONSTRUCT_FLOW, "ReconstructFlow")
];
for &(damage, damage_str) in &to_iter {
if self.contains(damage) {
if!first_elem { write!(f, " | ")?; }
write!(f, "{}", damage_str)?;
first_elem = false;
}
}
if first_elem {
write!(f, "NoDamage")?;
|
// NB: We need the braces inside the RHS due to Rust #8012. This particular
// version of this macro might be safe anyway, but we want to avoid silent
// breakage on modifications.
macro_rules! add_if_not_equal(
($old:ident, $new:ident, $damage:ident,
[ $($effect:ident),* ], [ $($style_struct_getter:ident.$name:ident),* ]) => ({
if $( ($old.$style_struct_getter().$name!= $new.$style_struct_getter().$name) )||* {
$damage.insert($($effect)|*);
true
} else {
false
}
})
);
fn compute_damage(old: &ComputedValues, new: &ComputedValues) -> ServoRestyleDamage {
let mut damage = ServoRestyleDamage::empty();
// This should check every CSS property, as enumerated in the fields of
// http://doc.servo.org/style/properties/struct.ComputedValues.html
// FIXME: Test somehow that every property is included.
add_if_not_equal!(old, new, damage,
[REPAINT, REPOSITION, STORE_OVERFLOW, BUBBLE_ISIZES, REFLOW_OUT_OF_FLOW,
REFLOW, RECONSTRUCT_FLOW], [
get_box.clear, get_box.float, get_box.display, get_box.position, get_counters.content,
get_counters.counter_reset, get_counters.counter_increment,
get_inheritedbox._servo_under_display_none,
get_list.quotes, get_list.list_style_type,
// If these text or font properties change, we need to reconstruct the flow so that
// text shaping is re-run.
get_inheritedtext.letter_spacing, get_inheritedtext.text_rendering,
get_inheritedtext.text_transform, get_inheritedtext.word_spacing,
get_inheritedtext.overflow_wrap, get_inheritedtext.text_justify,
get_inheritedtext.white_space, get_inheritedtext.word_break, get_text.text_overflow,
get_font.font_family, get_font.font_style, get_font.font_variant_caps, get_font.font_weight,
get_font.font_size, get_font.font_stretch,
get_inheritedbox.direction, get_inheritedbox.writing_mode,
get_text.text_decoration_line, get_text.unicode_bidi,
get_inheritedtable.empty_cells, get_inheritedtable.caption_side,
get_column.column_width, get_column.column_count
]) || (new.get_box().display == display::T::inline &&
add_if_not_equal!(old, new, damage,
[REPAINT, REPOSITION, STORE_OVERFLOW, BUBBLE_ISIZES,
REFLOW_OUT_OF_FLOW, REFLOW, RECONSTRUCT_FLOW], [
// For inline boxes only, border/padding styles are used in flow construction (to decide
// whether to create fragments for empty flows).
get_border.border_top_width, get_border.border_right_width,
get_border.border_bottom_width, get_border.border_left_width,
get_padding.padding_top, get_padding.padding_right,
get_padding.padding_bottom, get_padding.padding_left
])) || add_if_not_equal!(old, new, damage,
[REPAINT, REPOSITION, STORE_OVERFLOW, BUBBLE_ISIZES,
REFLOW_OUT_OF_FLOW, REFLOW],
[get_border.border_top_width, get_border.border_right_width,
get_border.border_bottom_width, get_border.border_left_width,
get_margin.margin_top, get_margin.margin_right,
get_margin.margin_bottom, get_margin.margin_left,
get_padding.padding_top, get_padding.padding_right,
get_padding.padding_bottom, get_padding.padding_left,
get_position.width, get_position.height,
get_inheritedtext.line_height,
get_inheritedtext.text_align, get_inheritedtext.text_indent,
get_table.table_layout,
get_inheritedtable.border_collapse,
get_inheritedtable.border_spacing,
get_column.column_gap,
get_position.flex_direction,
get_position.flex_wrap,
get_position.justify_content,
get_position.align_items,
get_position.align_content,
get_position.order,
get_position.flex_basis,
get_position.flex_grow,
get_position.flex_shrink,
get_position.align_self
]) || add_if_not_equal!(old, new, damage,
[REPAINT, REPOSITION, STORE_OVERFLOW, REFLOW_OUT_OF_FLOW], [
get_position.top, get_position.left,
get_position.right, get_position.bottom,
get_effects.opacity,
get_box.transform, get_box.transform_style, get_box.transform_origin,
get_box.perspective, get_box.perspective_origin
]) || add_if_not_equal!(old, new, damage,
[REPAINT], [
get_color.color, get_background.background_color,
get_background.background_image, get_background.background_position_x,
get_background.background_position_y, get_background.background_repeat,
get_background.background_attachment, get_background.background_clip,
get_background.background_origin, get_background.background_size,
get_border.border_top_color, get_border.border_right_color,
get_border.border_bottom_color, get_border.border_left_color,
get_border.border_top_style, get_border.border_right_style,
get_border.border_bottom_style, get_border.border_left_style,
get_border.border_top_left_radius, get_border.border_top_right_radius,
get_border.border_bottom_left_radius, get_border.border_bottom_right_radius,
get_position.z_index, get_box._servo_overflow_clip_box,
get_inheritedtext._servo_text_decorations_in_effect,
get_pointing.cursor, get_pointing.pointer_events,
get_effects.box_shadow, get_effects.clip, get_inheritedtext.text_shadow, get_effects.filter,
get_effects.mix_blend_mode, get_inheritedbox.image_rendering,
// Note: May require REFLOW et al. if `visibility: collapse` is implemented.
get_inheritedbox.visibility
]);
// Paint worklets may depend on custom properties,
// so if they have changed we should repaint.
if old.get_custom_properties()!= new.get_custom_properties() {
damage.insert(REPAINT);
}
// If the layer requirements of this flow have changed due to the value
// of the transform, then reflow is required to rebuild the layers.
if old.transform_requires_layer()!= new.transform_requires_layer() {
damage.insert(ServoRestyleDamage::rebuild_and_reflow());
}
damage
}
|
}
Ok(())
}
}
|
random_line_split
|
parser.rs
|
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
// except according to those terms.
//! Higher-level Rust constructs for http_parser
use std::vec::raw::from_buf_raw;
use std::libc::{c_int, c_void, c_char, size_t};
use std::ptr::{null, to_unsafe_ptr};
use std::str;
use http_parser;
use http_parser::{http_parser_settings, HTTP_REQUEST};
use http_parser::{http_parser_init, http_parser_execute};
use http_parser::{enum_http_errno, http_errno_name, http_errno_description};
// pub type HttpCallback = || -> bool;
// pub type HttpDataCallback = |data: ~[u8]| -> bool;
pub struct ParserCallbacks<'self> {
on_message_begin: &'self fn () -> bool,
on_url: &'self fn (data: ~[u8]) -> bool,
on_status_complete: &'self fn () -> bool,
on_header_field: &'self fn (data: ~[u8]) -> bool,
on_header_value: &'self fn (data: ~[u8]) -> bool,
on_headers_complete: &'self fn () -> bool,
on_body: &'self fn (data: ~[u8]) -> bool,
on_message_complete: &'self fn () -> bool
}
pub struct Parser {
http_parser: http_parser::http_parser,
settings: http_parser_settings
}
pub fn Parser() -> Parser {
#[fixed_stack_segment];
let http_parser = http_parser::struct_http_parser {
_type_flags: 0,
state: 0,
header_state: 0,
index: 0,
nread: 0,
content_length: 0,
http_major: 0,
http_minor: 0,
status_code: 0,
method: 0,
http_errno_upgrade: 0,
data: null()
};
unsafe {
http_parser_init(&http_parser, HTTP_REQUEST);
}
let settings = http_parser::struct_http_parser_settings {
on_message_begin: on_message_begin as *u8,
on_url: on_url as *u8,
on_status_complete: on_status_complete as *u8,
on_header_field: on_header_field as *u8,
on_header_value: on_header_value as *u8,
on_headers_complete: on_headers_complete as *u8,
on_body: on_body as *u8,
on_message_complete: on_message_complete as *u8
};
Parser {
http_parser: http_parser,
settings: settings
}
}
impl Parser {
pub fn execute(&mut self, data: &[u8], callbacks: &ParserCallbacks) -> uint {
#[fixed_stack_segment];
unsafe {
self.http_parser.data = to_unsafe_ptr(callbacks) as *c_void;
do data.as_imm_buf |buf, _| {
http_parser_execute(&self.http_parser,
&self.settings,
buf as *c_char,
data.len() as size_t) as uint
}
}
}
pub fn status_code(&self) -> uint {
self.http_parser.status_code as uint
}
pub fn method(&self) -> uint {
self.http_parser.method as uint
}
pub fn error(&self) -> (~str, ~str) {
#[fixed_stack_segment];
let err = (self.http_parser.http_errno_upgrade & 0x7f) as enum_http_errno;
unsafe {
(str::raw::from_c_str(http_errno_name(err)),
str::raw::from_c_str(http_errno_description(err)))
}
}
}
fn callbacks(http_parser: *http_parser::http_parser) -> *ParserCallbacks {
unsafe {
assert!((*http_parser).data.is_not_null());
return (*http_parser).data as *ParserCallbacks;
}
}
extern fn on_message_begin(http_parser: *http_parser::http_parser) -> c_int {
unsafe {
(!((*callbacks(http_parser)).on_message_begin)()) as c_int
}
}
extern fn on_url(http_parser: *http_parser::http_parser, at: *u8, length: size_t) -> c_int {
unsafe {
(!(((*callbacks(http_parser)).on_url)(from_buf_raw(at, length as uint)))) as c_int
}
}
extern fn on_status_complete(http_parser: *http_parser::http_parser) -> c_int {
unsafe {
(!((*callbacks(http_parser)).on_status_complete)()) as c_int
}
}
extern fn on_header_field(http_parser: *http_parser::http_parser, at: *u8, length: size_t) ->
c_int {
unsafe {
(!((*callbacks(http_parser)).on_header_field)(from_buf_raw(at, length as uint))) as c_int
}
}
extern fn on_header_value(http_parser: *http_parser::http_parser, at: *u8, length: size_t) ->
c_int {
unsafe {
(!((*callbacks(http_parser)).on_header_value)(from_buf_raw(at, length as uint))) as c_int
}
}
extern fn on_headers_complete(http_parser: *http_parser::http_parser) -> c_int {
unsafe {
(!((*callbacks(http_parser)).on_headers_complete)()) as c_int
}
}
extern fn on_body(http_parser: *http_parser::http_parser, at: *u8, length: size_t) -> c_int {
unsafe {
(!((*callbacks(http_parser)).on_body)(from_buf_raw(at, length as uint))) as c_int
}
}
extern fn on_message_complete(http_parser: *http_parser::http_parser) -> c_int {
unsafe {
(!((*callbacks(http_parser)).on_message_complete)()) as c_int
}
}
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
|
random_line_split
|
parser.rs
|
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Higher-level Rust constructs for http_parser
use std::vec::raw::from_buf_raw;
use std::libc::{c_int, c_void, c_char, size_t};
use std::ptr::{null, to_unsafe_ptr};
use std::str;
use http_parser;
use http_parser::{http_parser_settings, HTTP_REQUEST};
use http_parser::{http_parser_init, http_parser_execute};
use http_parser::{enum_http_errno, http_errno_name, http_errno_description};
// pub type HttpCallback = || -> bool;
// pub type HttpDataCallback = |data: ~[u8]| -> bool;
pub struct ParserCallbacks<'self> {
on_message_begin: &'self fn () -> bool,
on_url: &'self fn (data: ~[u8]) -> bool,
on_status_complete: &'self fn () -> bool,
on_header_field: &'self fn (data: ~[u8]) -> bool,
on_header_value: &'self fn (data: ~[u8]) -> bool,
on_headers_complete: &'self fn () -> bool,
on_body: &'self fn (data: ~[u8]) -> bool,
on_message_complete: &'self fn () -> bool
}
pub struct Parser {
http_parser: http_parser::http_parser,
settings: http_parser_settings
}
pub fn Parser() -> Parser
|
let settings = http_parser::struct_http_parser_settings {
on_message_begin: on_message_begin as *u8,
on_url: on_url as *u8,
on_status_complete: on_status_complete as *u8,
on_header_field: on_header_field as *u8,
on_header_value: on_header_value as *u8,
on_headers_complete: on_headers_complete as *u8,
on_body: on_body as *u8,
on_message_complete: on_message_complete as *u8
};
Parser {
http_parser: http_parser,
settings: settings
}
}
impl Parser {
pub fn execute(&mut self, data: &[u8], callbacks: &ParserCallbacks) -> uint {
#[fixed_stack_segment];
unsafe {
self.http_parser.data = to_unsafe_ptr(callbacks) as *c_void;
do data.as_imm_buf |buf, _| {
http_parser_execute(&self.http_parser,
&self.settings,
buf as *c_char,
data.len() as size_t) as uint
}
}
}
pub fn status_code(&self) -> uint {
self.http_parser.status_code as uint
}
pub fn method(&self) -> uint {
self.http_parser.method as uint
}
pub fn error(&self) -> (~str, ~str) {
#[fixed_stack_segment];
let err = (self.http_parser.http_errno_upgrade & 0x7f) as enum_http_errno;
unsafe {
(str::raw::from_c_str(http_errno_name(err)),
str::raw::from_c_str(http_errno_description(err)))
}
}
}
fn callbacks(http_parser: *http_parser::http_parser) -> *ParserCallbacks {
unsafe {
assert!((*http_parser).data.is_not_null());
return (*http_parser).data as *ParserCallbacks;
}
}
extern fn on_message_begin(http_parser: *http_parser::http_parser) -> c_int {
unsafe {
(!((*callbacks(http_parser)).on_message_begin)()) as c_int
}
}
extern fn on_url(http_parser: *http_parser::http_parser, at: *u8, length: size_t) -> c_int {
unsafe {
(!(((*callbacks(http_parser)).on_url)(from_buf_raw(at, length as uint)))) as c_int
}
}
extern fn on_status_complete(http_parser: *http_parser::http_parser) -> c_int {
unsafe {
(!((*callbacks(http_parser)).on_status_complete)()) as c_int
}
}
extern fn on_header_field(http_parser: *http_parser::http_parser, at: *u8, length: size_t) ->
c_int {
unsafe {
(!((*callbacks(http_parser)).on_header_field)(from_buf_raw(at, length as uint))) as c_int
}
}
extern fn on_header_value(http_parser: *http_parser::http_parser, at: *u8, length: size_t) ->
c_int {
unsafe {
(!((*callbacks(http_parser)).on_header_value)(from_buf_raw(at, length as uint))) as c_int
}
}
extern fn on_headers_complete(http_parser: *http_parser::http_parser) -> c_int {
unsafe {
(!((*callbacks(http_parser)).on_headers_complete)()) as c_int
}
}
extern fn on_body(http_parser: *http_parser::http_parser, at: *u8, length: size_t) -> c_int {
unsafe {
(!((*callbacks(http_parser)).on_body)(from_buf_raw(at, length as uint))) as c_int
}
}
extern fn on_message_complete(http_parser: *http_parser::http_parser) -> c_int {
unsafe {
(!((*callbacks(http_parser)).on_message_complete)()) as c_int
}
}
|
{
#[fixed_stack_segment];
let http_parser = http_parser::struct_http_parser {
_type_flags: 0,
state: 0,
header_state: 0,
index: 0,
nread: 0,
content_length: 0,
http_major: 0,
http_minor: 0,
status_code: 0,
method: 0,
http_errno_upgrade: 0,
data: null()
};
unsafe {
http_parser_init(&http_parser, HTTP_REQUEST);
}
|
identifier_body
|
parser.rs
|
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Higher-level Rust constructs for http_parser
use std::vec::raw::from_buf_raw;
use std::libc::{c_int, c_void, c_char, size_t};
use std::ptr::{null, to_unsafe_ptr};
use std::str;
use http_parser;
use http_parser::{http_parser_settings, HTTP_REQUEST};
use http_parser::{http_parser_init, http_parser_execute};
use http_parser::{enum_http_errno, http_errno_name, http_errno_description};
// pub type HttpCallback = || -> bool;
// pub type HttpDataCallback = |data: ~[u8]| -> bool;
pub struct ParserCallbacks<'self> {
on_message_begin: &'self fn () -> bool,
on_url: &'self fn (data: ~[u8]) -> bool,
on_status_complete: &'self fn () -> bool,
on_header_field: &'self fn (data: ~[u8]) -> bool,
on_header_value: &'self fn (data: ~[u8]) -> bool,
on_headers_complete: &'self fn () -> bool,
on_body: &'self fn (data: ~[u8]) -> bool,
on_message_complete: &'self fn () -> bool
}
pub struct Parser {
http_parser: http_parser::http_parser,
settings: http_parser_settings
}
pub fn
|
() -> Parser {
#[fixed_stack_segment];
let http_parser = http_parser::struct_http_parser {
_type_flags: 0,
state: 0,
header_state: 0,
index: 0,
nread: 0,
content_length: 0,
http_major: 0,
http_minor: 0,
status_code: 0,
method: 0,
http_errno_upgrade: 0,
data: null()
};
unsafe {
http_parser_init(&http_parser, HTTP_REQUEST);
}
let settings = http_parser::struct_http_parser_settings {
on_message_begin: on_message_begin as *u8,
on_url: on_url as *u8,
on_status_complete: on_status_complete as *u8,
on_header_field: on_header_field as *u8,
on_header_value: on_header_value as *u8,
on_headers_complete: on_headers_complete as *u8,
on_body: on_body as *u8,
on_message_complete: on_message_complete as *u8
};
Parser {
http_parser: http_parser,
settings: settings
}
}
impl Parser {
pub fn execute(&mut self, data: &[u8], callbacks: &ParserCallbacks) -> uint {
#[fixed_stack_segment];
unsafe {
self.http_parser.data = to_unsafe_ptr(callbacks) as *c_void;
do data.as_imm_buf |buf, _| {
http_parser_execute(&self.http_parser,
&self.settings,
buf as *c_char,
data.len() as size_t) as uint
}
}
}
pub fn status_code(&self) -> uint {
self.http_parser.status_code as uint
}
pub fn method(&self) -> uint {
self.http_parser.method as uint
}
pub fn error(&self) -> (~str, ~str) {
#[fixed_stack_segment];
let err = (self.http_parser.http_errno_upgrade & 0x7f) as enum_http_errno;
unsafe {
(str::raw::from_c_str(http_errno_name(err)),
str::raw::from_c_str(http_errno_description(err)))
}
}
}
fn callbacks(http_parser: *http_parser::http_parser) -> *ParserCallbacks {
unsafe {
assert!((*http_parser).data.is_not_null());
return (*http_parser).data as *ParserCallbacks;
}
}
extern fn on_message_begin(http_parser: *http_parser::http_parser) -> c_int {
unsafe {
(!((*callbacks(http_parser)).on_message_begin)()) as c_int
}
}
extern fn on_url(http_parser: *http_parser::http_parser, at: *u8, length: size_t) -> c_int {
unsafe {
(!(((*callbacks(http_parser)).on_url)(from_buf_raw(at, length as uint)))) as c_int
}
}
extern fn on_status_complete(http_parser: *http_parser::http_parser) -> c_int {
unsafe {
(!((*callbacks(http_parser)).on_status_complete)()) as c_int
}
}
extern fn on_header_field(http_parser: *http_parser::http_parser, at: *u8, length: size_t) ->
c_int {
unsafe {
(!((*callbacks(http_parser)).on_header_field)(from_buf_raw(at, length as uint))) as c_int
}
}
extern fn on_header_value(http_parser: *http_parser::http_parser, at: *u8, length: size_t) ->
c_int {
unsafe {
(!((*callbacks(http_parser)).on_header_value)(from_buf_raw(at, length as uint))) as c_int
}
}
extern fn on_headers_complete(http_parser: *http_parser::http_parser) -> c_int {
unsafe {
(!((*callbacks(http_parser)).on_headers_complete)()) as c_int
}
}
extern fn on_body(http_parser: *http_parser::http_parser, at: *u8, length: size_t) -> c_int {
unsafe {
(!((*callbacks(http_parser)).on_body)(from_buf_raw(at, length as uint))) as c_int
}
}
extern fn on_message_complete(http_parser: *http_parser::http_parser) -> c_int {
unsafe {
(!((*callbacks(http_parser)).on_message_complete)()) as c_int
}
}
|
Parser
|
identifier_name
|
mod.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! RPC mocked tests. Most of these test that the RPC server is serializing and forwarding
//! method calls properly.
|
mod manage_network;
mod net;
mod parity;
mod parity_accounts;
mod parity_set;
mod personal;
mod rpc;
mod signer;
mod signing;
mod traces;
mod web3;
|
mod eth;
|
random_line_split
|
nodes.rs
|
use super::prelude::*;
use stencila_schema::Node;
/// Override of macro to implement `from_value` for all node types
macro_rules! patchable_node_variants {
($( $variant:path )*) => {
impl Patchable for Node {
patchable_variants_is_equal!($( $variant )*);
patchable_variants_hash!($( $variant )*);
patchable_variants_apply_add!($( $variant )*);
patchable_variants_apply_remove!($( $variant )*);
patchable_variants_apply_replace!($( $variant )*);
patchable_variants_apply_move!($( $variant )*);
patchable_variants_apply_transform!($( $variant )*);
fn diff(&self, other: &Self, differ: &mut Differ) {
#[allow(unreachable_patterns)]
match (self, other) {
// For the atomic primitives, do replacement at this level,
// so that the `Replace` operation has a `value` of type
// `Node::Number` not a `f64` etc.
(Node::Boolean(..), Node::Boolean(..)) |
(Node::Integer(..), Node::Integer(..)) |
(Node::Number(..), Node::Number(..)) => {
if!self.is_equal(other).is_ok() {
differ.replace(other)
}
},
// For other matching pairs of other variants do diffing
$(
($variant(me), $variant(other)) => me.diff(other, differ),
)*
// Usual fallback to replacement for unmatched variants
_ => differ.replace(other)
}
}
fn from_value(value: &Value) -> Result<Self>
where
Self: Clone + Sized +'static,
{
if let Some(value) = value.downcast_ref::<Self>() {
return Ok(value.clone());
} else if let Some(value) = value.downcast_ref::<serde_json::Value>() {
if let Some(string) = value.as_str() {
return Ok(Node::String(string.to_string()));
}
if let Some(number) = value.as_f64() {
return Ok(Node::Number(number));
}
if let Some(integer) = value.as_i64() {
return Ok(Node::Integer(integer));
}
if let Some(boolean) = value.as_bool() {
return Ok(Node::Boolean(boolean));
}
}
bail!(invalid_patch_value::<Self>())
}
}
|
};
}
patchable_node_variants!(
Node::Array
Node::Article
Node::AudioObject
Node::Boolean
Node::Cite
Node::CiteGroup
Node::Claim
Node::CodeBlock
Node::CodeChunk
Node::CodeExpression
Node::CodeFragment
Node::Datatable
Node::DatatableColumn
Node::Delete
Node::Emphasis
Node::Figure
Node::Heading
Node::ImageObject
Node::Integer
Node::Link
Node::List
Node::MathBlock
Node::MathFragment
Node::NontextualAnnotation
Node::Note
Node::Null
Node::Number
Node::Object
Node::Paragraph
Node::Parameter
Node::Quote
Node::QuoteBlock
Node::String
Node::Strong
Node::Subscript
Node::Superscript
Node::Table
Node::ThematicBreak
Node::VideoObject
);
|
random_line_split
|
|
settings.rs
|
use ProtocolEngineBuilder;
use Protocol;
pub trait OptionSetter<T> {
fn set_option(self, T) -> T;
}
#[derive(Clone,Copy,Debug)]
pub struct Bytes(pub usize);
#[derive(Clone,Copy,Debug)]
pub struct Kilobytes(pub usize);
#[derive(Clone,Copy,Debug)]
pub struct Megabytes(pub usize);
pub trait ToBytes {
fn to_bytes(&self) -> Bytes;
}
impl ToBytes for Bytes {
fn to_bytes(&self) -> Bytes {
*self
}
}
impl ToBytes for Kilobytes {
fn to_bytes(&self) -> Bytes {
let Kilobytes(kb) = *self;
Bytes(kb * 1_000)
}
}
impl ToBytes for Megabytes {
fn to_bytes(&self) -> Bytes {
let Megabytes(mb) = *self;
Bytes(mb * 1_000_000)
}
}
pub struct InitialBufferSize<T>(pub T) where T: ToBytes;
pub struct
|
(pub usize);
pub struct MaxBufferPoolSize(pub usize);
impl <P, T> OptionSetter<ProtocolEngineBuilder<P>> for InitialBufferSize<T> where P: Protocol, T: ToBytes {
fn set_option(self, mut builder: ProtocolEngineBuilder<P>) -> ProtocolEngineBuilder<P> {
let InitialBufferSize(size) = self;
let number_of_bytes: Bytes = size.to_bytes();
builder.starting_buffer_size = number_of_bytes;
builder
}
}
impl <P> OptionSetter<ProtocolEngineBuilder<P>> for InitialBufferPoolSize where P: Protocol {
fn set_option(self, mut builder: ProtocolEngineBuilder<P>) -> ProtocolEngineBuilder<P> {
let InitialBufferPoolSize(number_of_buffers) = self;
builder.buffer_pool_size = number_of_buffers;
builder
}
}
impl <P> OptionSetter<ProtocolEngineBuilder<P>> for MaxBufferPoolSize where P: Protocol {
fn set_option(self, mut builder: ProtocolEngineBuilder<P>) -> ProtocolEngineBuilder<P> {
let MaxBufferPoolSize(number_of_buffers) = self;
builder.max_buffer_pool_size = number_of_buffers;
builder
}
}
|
InitialBufferPoolSize
|
identifier_name
|
settings.rs
|
use ProtocolEngineBuilder;
use Protocol;
pub trait OptionSetter<T> {
fn set_option(self, T) -> T;
}
#[derive(Clone,Copy,Debug)]
pub struct Bytes(pub usize);
#[derive(Clone,Copy,Debug)]
pub struct Kilobytes(pub usize);
#[derive(Clone,Copy,Debug)]
pub struct Megabytes(pub usize);
pub trait ToBytes {
fn to_bytes(&self) -> Bytes;
}
impl ToBytes for Bytes {
fn to_bytes(&self) -> Bytes {
*self
}
}
impl ToBytes for Kilobytes {
fn to_bytes(&self) -> Bytes {
let Kilobytes(kb) = *self;
Bytes(kb * 1_000)
}
}
impl ToBytes for Megabytes {
fn to_bytes(&self) -> Bytes {
let Megabytes(mb) = *self;
Bytes(mb * 1_000_000)
}
}
pub struct InitialBufferSize<T>(pub T) where T: ToBytes;
pub struct InitialBufferPoolSize(pub usize);
pub struct MaxBufferPoolSize(pub usize);
impl <P, T> OptionSetter<ProtocolEngineBuilder<P>> for InitialBufferSize<T> where P: Protocol, T: ToBytes {
fn set_option(self, mut builder: ProtocolEngineBuilder<P>) -> ProtocolEngineBuilder<P> {
let InitialBufferSize(size) = self;
let number_of_bytes: Bytes = size.to_bytes();
builder.starting_buffer_size = number_of_bytes;
builder
}
}
impl <P> OptionSetter<ProtocolEngineBuilder<P>> for InitialBufferPoolSize where P: Protocol {
fn set_option(self, mut builder: ProtocolEngineBuilder<P>) -> ProtocolEngineBuilder<P> {
let InitialBufferPoolSize(number_of_buffers) = self;
builder.buffer_pool_size = number_of_buffers;
builder
}
}
impl <P> OptionSetter<ProtocolEngineBuilder<P>> for MaxBufferPoolSize where P: Protocol {
fn set_option(self, mut builder: ProtocolEngineBuilder<P>) -> ProtocolEngineBuilder<P> {
let MaxBufferPoolSize(number_of_buffers) = self;
builder.max_buffer_pool_size = number_of_buffers;
|
builder
}
}
|
random_line_split
|
|
lib.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_name = "rustc_borrowck"]
#![unstable]
#![staged_api]
#![crate_type = "dylib"]
#![crate_type = "rlib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/")]
#![allow(unknown_features)]
#![feature(quote)]
|
#![feature(rustc_diagnostic_macros)]
#![allow(unknown_features)] #![feature(int_uint)]
#![allow(non_camel_case_types)]
#![allow(unstable)]
#[macro_use] extern crate log;
#[macro_use] extern crate syntax;
// for "clarity", rename the graphviz crate to dot; graphviz within `borrowck`
// refers to the borrowck-specific graphviz adapter traits.
extern crate "graphviz" as dot;
extern crate rustc;
pub use borrowck::check_crate;
pub use borrowck::build_borrowck_dataflow_data_for_fn;
pub use borrowck::FnPartsWithCFG;
mod borrowck;
pub mod graphviz;
|
#![feature(slicing_syntax, unsafe_destructor)]
|
random_line_split
|
main.rs
|
extern crate coio;
use std::io::{Read, Write};
use coio::net::TcpListener;
use coio::{spawn, run};
fn
|
() {
// Spawn a coroutine for accepting new connections
let mut x: i64 = 11;
spawn(move|| {
let acceptor = TcpListener::bind("127.0.0.1:4000").unwrap();
println!("Waiting for connection...");
for stream in acceptor.incoming() {
let mut stream = stream.unwrap();
x += 1;
//println!("Got connection from {:?}", stream.peer_addr().unwrap());
// Spawn a new coroutine to handle the connection
spawn(move|| {
let mut buf = [0; 1024];
x += 1;
loop {
match stream.read(&mut buf) {
Ok(0) => {
//println!("EOF");
break;
},
Ok(len) => {
//println!("Read {} bytes, echo back", len);
stream.write_all(&buf[0..len]).unwrap();
},
Err(err) => {
//pprintln!("Error occurs: {:?}", err);
break;
}
}
}
//println!("Client closed");
});
}
});
// Schedule with 4 threads
run(4);
}
|
main
|
identifier_name
|
main.rs
|
extern crate coio;
use std::io::{Read, Write};
use coio::net::TcpListener;
use coio::{spawn, run};
fn main() {
// Spawn a coroutine for accepting new connections
let mut x: i64 = 11;
spawn(move|| {
let acceptor = TcpListener::bind("127.0.0.1:4000").unwrap();
println!("Waiting for connection...");
for stream in acceptor.incoming() {
let mut stream = stream.unwrap();
x += 1;
//println!("Got connection from {:?}", stream.peer_addr().unwrap());
// Spawn a new coroutine to handle the connection
spawn(move|| {
let mut buf = [0; 1024];
x += 1;
loop {
match stream.read(&mut buf) {
Ok(0) => {
//println!("EOF");
break;
},
Ok(len) => {
//println!("Read {} bytes, echo back", len);
stream.write_all(&buf[0..len]).unwrap();
},
Err(err) => {
//pprintln!("Error occurs: {:?}", err);
break;
}
}
}
//println!("Client closed");
});
}
});
|
// Schedule with 4 threads
run(4);
}
|
random_line_split
|
|
main.rs
|
extern crate coio;
use std::io::{Read, Write};
use coio::net::TcpListener;
use coio::{spawn, run};
fn main()
|
match stream.read(&mut buf) {
Ok(0) => {
//println!("EOF");
break;
},
Ok(len) => {
//println!("Read {} bytes, echo back", len);
stream.write_all(&buf[0..len]).unwrap();
},
Err(err) => {
//pprintln!("Error occurs: {:?}", err);
break;
}
}
}
//println!("Client closed");
});
}
});
// Schedule with 4 threads
run(4);
}
|
{
// Spawn a coroutine for accepting new connections
let mut x: i64 = 11;
spawn(move|| {
let acceptor = TcpListener::bind("127.0.0.1:4000").unwrap();
println!("Waiting for connection ...");
for stream in acceptor.incoming() {
let mut stream = stream.unwrap();
x += 1;
//println!("Got connection from {:?}", stream.peer_addr().unwrap());
// Spawn a new coroutine to handle the connection
spawn(move|| {
let mut buf = [0; 1024];
x += 1;
loop {
|
identifier_body
|
singleton.rs
|
#![macro_use]
/**
* Thanks for http://stackoverflow.com/questions/27791532/how-do-i-create-a-global-mutable-singleton
*/
use std::sync::{Arc, Mutex, MutexGuard, LockResult};
//#[derive(Copy)]
pub struct SingletonHolder<T> {
// Since we will be used in many threads, we need to protect
// concurrent access
inner: Arc<Mutex<T>>,
}
impl<T> SingletonHolder<T> {
pub fn new(mutex: Arc<Mutex<T>>) -> SingletonHolder<T> {
SingletonHolder {
inner: mutex,
}
}
pub fn lock(&self) -> LockResult<MutexGuard<T>> {
|
impl <T> Clone for SingletonHolder<T> {
fn clone(&self) -> SingletonHolder<T> {
SingletonHolder {
inner: self.inner.clone(),
}
}
}
#[macro_export]
macro_rules! declare_singleton {
(
$name: ident, // Function name
$t: ty, // Embedded type
$init: expr // Initial value
) => (
fn $name() -> $crate::singleton::SingletonHolder<$t> {
static mut SINGLETON: *const $crate::singleton::SingletonHolder<$t> = 0 as *const $crate::singleton::SingletonHolder<$t>;
static ONCE: ::std::sync::Once = ::std::sync::ONCE_INIT;
unsafe {
ONCE.call_once(|| {
let singleton = $crate::singleton::SingletonHolder::new(::std::sync::Arc::new(::std::sync::Mutex::new($init)));
// Put it in the heap so it can outlive this call
SINGLETON = ::std::mem::transmute(Box::new(singleton));
// Make sure to free heap memory at exit
/* This doesn't exist in stable 1.0, so we will just leak it!
rt::at_exit(|| {
let singleton: Box<SingletonHolder> = mem::transmute(SINGLETON);
// Let's explictly free the memory for this example
drop(singleton);
// Set it to null again. I hope only one thread can call `at_exit`!
SINGLETON = 0 as *const _;
});
*/
});
(*SINGLETON).clone()
}
}
)
}
#[cfg(test)]
mod test {
#[test]
fn smoke_test() {
declare_singleton!(simple_singleton, u32, 0);
let simple = simple_singleton();
match simple.lock() {
Ok(_) => {}
Err(_) => {}
};
}
}
|
self.inner.lock()
}
}
|
identifier_body
|
singleton.rs
|
#![macro_use]
/**
* Thanks for http://stackoverflow.com/questions/27791532/how-do-i-create-a-global-mutable-singleton
*/
use std::sync::{Arc, Mutex, MutexGuard, LockResult};
//#[derive(Copy)]
pub struct SingletonHolder<T> {
// Since we will be used in many threads, we need to protect
// concurrent access
inner: Arc<Mutex<T>>,
}
impl<T> SingletonHolder<T> {
pub fn new(mutex: Arc<Mutex<T>>) -> SingletonHolder<T> {
SingletonHolder {
inner: mutex,
}
}
pub fn lock(&self) -> LockResult<MutexGuard<T>> {
self.inner.lock()
}
}
impl <T> Clone for SingletonHolder<T> {
fn clone(&self) -> SingletonHolder<T> {
SingletonHolder {
inner: self.inner.clone(),
}
}
}
#[macro_export]
macro_rules! declare_singleton {
(
$name: ident, // Function name
$t: ty, // Embedded type
$init: expr // Initial value
) => (
fn $name() -> $crate::singleton::SingletonHolder<$t> {
static mut SINGLETON: *const $crate::singleton::SingletonHolder<$t> = 0 as *const $crate::singleton::SingletonHolder<$t>;
static ONCE: ::std::sync::Once = ::std::sync::ONCE_INIT;
unsafe {
ONCE.call_once(|| {
let singleton = $crate::singleton::SingletonHolder::new(::std::sync::Arc::new(::std::sync::Mutex::new($init)));
// Put it in the heap so it can outlive this call
SINGLETON = ::std::mem::transmute(Box::new(singleton));
// Make sure to free heap memory at exit
/* This doesn't exist in stable 1.0, so we will just leak it!
rt::at_exit(|| {
let singleton: Box<SingletonHolder> = mem::transmute(SINGLETON);
// Let's explictly free the memory for this example
drop(singleton);
// Set it to null again. I hope only one thread can call `at_exit`!
SINGLETON = 0 as *const _;
});
*/
});
(*SINGLETON).clone()
}
}
)
}
#[cfg(test)]
mod test {
#[test]
fn smoke_test() {
declare_singleton!(simple_singleton, u32, 0);
let simple = simple_singleton();
match simple.lock() {
Ok(_) => {}
Err(_) => {}
|
};
}
}
|
conditional_block
|
|
singleton.rs
|
#![macro_use]
/**
* Thanks for http://stackoverflow.com/questions/27791532/how-do-i-create-a-global-mutable-singleton
*/
use std::sync::{Arc, Mutex, MutexGuard, LockResult};
//#[derive(Copy)]
pub struct SingletonHolder<T> {
// Since we will be used in many threads, we need to protect
// concurrent access
inner: Arc<Mutex<T>>,
}
impl<T> SingletonHolder<T> {
pub fn ne
|
utex: Arc<Mutex<T>>) -> SingletonHolder<T> {
SingletonHolder {
inner: mutex,
}
}
pub fn lock(&self) -> LockResult<MutexGuard<T>> {
self.inner.lock()
}
}
impl <T> Clone for SingletonHolder<T> {
fn clone(&self) -> SingletonHolder<T> {
SingletonHolder {
inner: self.inner.clone(),
}
}
}
#[macro_export]
macro_rules! declare_singleton {
(
$name: ident, // Function name
$t: ty, // Embedded type
$init: expr // Initial value
) => (
fn $name() -> $crate::singleton::SingletonHolder<$t> {
static mut SINGLETON: *const $crate::singleton::SingletonHolder<$t> = 0 as *const $crate::singleton::SingletonHolder<$t>;
static ONCE: ::std::sync::Once = ::std::sync::ONCE_INIT;
unsafe {
ONCE.call_once(|| {
let singleton = $crate::singleton::SingletonHolder::new(::std::sync::Arc::new(::std::sync::Mutex::new($init)));
// Put it in the heap so it can outlive this call
SINGLETON = ::std::mem::transmute(Box::new(singleton));
// Make sure to free heap memory at exit
/* This doesn't exist in stable 1.0, so we will just leak it!
rt::at_exit(|| {
let singleton: Box<SingletonHolder> = mem::transmute(SINGLETON);
// Let's explictly free the memory for this example
drop(singleton);
// Set it to null again. I hope only one thread can call `at_exit`!
SINGLETON = 0 as *const _;
});
*/
});
(*SINGLETON).clone()
}
}
)
}
#[cfg(test)]
mod test {
#[test]
fn smoke_test() {
declare_singleton!(simple_singleton, u32, 0);
let simple = simple_singleton();
match simple.lock() {
Ok(_) => {}
Err(_) => {}
};
}
}
|
w(m
|
identifier_name
|
singleton.rs
|
#![macro_use]
/**
|
use std::sync::{Arc, Mutex, MutexGuard, LockResult};
//#[derive(Copy)]
pub struct SingletonHolder<T> {
// Since we will be used in many threads, we need to protect
// concurrent access
inner: Arc<Mutex<T>>,
}
impl<T> SingletonHolder<T> {
pub fn new(mutex: Arc<Mutex<T>>) -> SingletonHolder<T> {
SingletonHolder {
inner: mutex,
}
}
pub fn lock(&self) -> LockResult<MutexGuard<T>> {
self.inner.lock()
}
}
impl <T> Clone for SingletonHolder<T> {
fn clone(&self) -> SingletonHolder<T> {
SingletonHolder {
inner: self.inner.clone(),
}
}
}
#[macro_export]
macro_rules! declare_singleton {
(
$name: ident, // Function name
$t: ty, // Embedded type
$init: expr // Initial value
) => (
fn $name() -> $crate::singleton::SingletonHolder<$t> {
static mut SINGLETON: *const $crate::singleton::SingletonHolder<$t> = 0 as *const $crate::singleton::SingletonHolder<$t>;
static ONCE: ::std::sync::Once = ::std::sync::ONCE_INIT;
unsafe {
ONCE.call_once(|| {
let singleton = $crate::singleton::SingletonHolder::new(::std::sync::Arc::new(::std::sync::Mutex::new($init)));
// Put it in the heap so it can outlive this call
SINGLETON = ::std::mem::transmute(Box::new(singleton));
// Make sure to free heap memory at exit
/* This doesn't exist in stable 1.0, so we will just leak it!
rt::at_exit(|| {
let singleton: Box<SingletonHolder> = mem::transmute(SINGLETON);
// Let's explictly free the memory for this example
drop(singleton);
// Set it to null again. I hope only one thread can call `at_exit`!
SINGLETON = 0 as *const _;
});
*/
});
(*SINGLETON).clone()
}
}
)
}
#[cfg(test)]
mod test {
#[test]
fn smoke_test() {
declare_singleton!(simple_singleton, u32, 0);
let simple = simple_singleton();
match simple.lock() {
Ok(_) => {}
Err(_) => {}
};
}
}
|
* Thanks for http://stackoverflow.com/questions/27791532/how-do-i-create-a-global-mutable-singleton
*/
|
random_line_split
|
maxpd.rs
|
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn maxpd_1() {
run_test(&Instruction { mnemonic: Mnemonic::MAXPD, operand1: Some(Direct(XMM6)), operand2: Some(Direct(XMM5)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 95, 245], OperandSize::Dword)
}
fn maxpd_2()
|
fn maxpd_3() {
run_test(&Instruction { mnemonic: Mnemonic::MAXPD, operand1: Some(Direct(XMM4)), operand2: Some(Direct(XMM6)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 95, 230], OperandSize::Qword)
}
fn maxpd_4() {
run_test(&Instruction { mnemonic: Mnemonic::MAXPD, operand1: Some(Direct(XMM0)), operand2: Some(IndirectScaledIndexed(RCX, RDI, Eight, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 95, 4, 249], OperandSize::Qword)
}
|
{
run_test(&Instruction { mnemonic: Mnemonic::MAXPD, operand1: Some(Direct(XMM2)), operand2: Some(IndirectScaledIndexedDisplaced(EDX, ESI, Two, 550510135, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 95, 148, 114, 55, 30, 208, 32], OperandSize::Dword)
}
|
identifier_body
|
maxpd.rs
|
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
|
fn maxpd_1() {
run_test(&Instruction { mnemonic: Mnemonic::MAXPD, operand1: Some(Direct(XMM6)), operand2: Some(Direct(XMM5)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 95, 245], OperandSize::Dword)
}
fn maxpd_2() {
run_test(&Instruction { mnemonic: Mnemonic::MAXPD, operand1: Some(Direct(XMM2)), operand2: Some(IndirectScaledIndexedDisplaced(EDX, ESI, Two, 550510135, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 95, 148, 114, 55, 30, 208, 32], OperandSize::Dword)
}
fn maxpd_3() {
run_test(&Instruction { mnemonic: Mnemonic::MAXPD, operand1: Some(Direct(XMM4)), operand2: Some(Direct(XMM6)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 95, 230], OperandSize::Qword)
}
fn maxpd_4() {
run_test(&Instruction { mnemonic: Mnemonic::MAXPD, operand1: Some(Direct(XMM0)), operand2: Some(IndirectScaledIndexed(RCX, RDI, Eight, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 95, 4, 249], OperandSize::Qword)
}
|
use ::RegScale::*;
|
random_line_split
|
maxpd.rs
|
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn
|
() {
run_test(&Instruction { mnemonic: Mnemonic::MAXPD, operand1: Some(Direct(XMM6)), operand2: Some(Direct(XMM5)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 95, 245], OperandSize::Dword)
}
fn maxpd_2() {
run_test(&Instruction { mnemonic: Mnemonic::MAXPD, operand1: Some(Direct(XMM2)), operand2: Some(IndirectScaledIndexedDisplaced(EDX, ESI, Two, 550510135, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 95, 148, 114, 55, 30, 208, 32], OperandSize::Dword)
}
fn maxpd_3() {
run_test(&Instruction { mnemonic: Mnemonic::MAXPD, operand1: Some(Direct(XMM4)), operand2: Some(Direct(XMM6)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 95, 230], OperandSize::Qword)
}
fn maxpd_4() {
run_test(&Instruction { mnemonic: Mnemonic::MAXPD, operand1: Some(Direct(XMM0)), operand2: Some(IndirectScaledIndexed(RCX, RDI, Eight, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 95, 4, 249], OperandSize::Qword)
}
|
maxpd_1
|
identifier_name
|
tokenstream.rs
|
: Send + Sync,
{
}
impl TokenTree {
/// Checks if this `TokenTree` is equal to the other, regardless of span information.
pub fn eq_unspanned(&self, other: &TokenTree) -> bool {
match (self, other) {
(TokenTree::Token(token), TokenTree::Token(token2)) => token.kind == token2.kind,
(TokenTree::Delimited(_, delim, tts), TokenTree::Delimited(_, delim2, tts2)) => {
delim == delim2 && tts.eq_unspanned(&tts2)
}
_ => false,
}
}
/// Retrieves the `TokenTree`'s span.
pub fn span(&self) -> Span {
match self {
TokenTree::Token(token) => token.span,
TokenTree::Delimited(sp,..) => sp.entire(),
}
}
/// Modify the `TokenTree`'s span in-place.
pub fn set_span(&mut self, span: Span) {
match self {
TokenTree::Token(token) => token.span = span,
TokenTree::Delimited(dspan,..) => *dspan = DelimSpan::from_single(span),
}
}
pub fn token(kind: TokenKind, span: Span) -> TokenTree {
TokenTree::Token(Token::new(kind, span))
}
/// Returns the opening delimiter as a token tree.
pub fn open_tt(span: DelimSpan, delim: DelimToken) -> TokenTree {
TokenTree::token(token::OpenDelim(delim), span.open)
}
/// Returns the closing delimiter as a token tree.
pub fn close_tt(span: DelimSpan, delim: DelimToken) -> TokenTree {
TokenTree::token(token::CloseDelim(delim), span.close)
}
pub fn uninterpolate(self) -> TokenTree {
match self {
TokenTree::Token(token) => TokenTree::Token(token.uninterpolate().into_owned()),
tt => tt,
}
}
}
impl<CTX> HashStable<CTX> for TokenStream
where
CTX: crate::HashStableContext,
{
fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
for sub_tt in self.trees() {
sub_tt.hash_stable(hcx, hasher);
}
}
}
pub trait CreateTokenStream: sync::Send + sync::Sync {
fn create_token_stream(&self) -> AttrAnnotatedTokenStream;
}
impl CreateTokenStream for AttrAnnotatedTokenStream {
fn create_token_stream(&self) -> AttrAnnotatedTokenStream {
self.clone()
}
}
/// A lazy version of [`TokenStream`], which defers creation
/// of an actual `TokenStream` until it is needed.
/// `Box` is here only to reduce the structure size.
#[derive(Clone)]
pub struct LazyTokenStream(Lrc<Box<dyn CreateTokenStream>>);
impl LazyTokenStream {
pub fn new(inner: impl CreateTokenStream +'static) -> LazyTokenStream {
LazyTokenStream(Lrc::new(Box::new(inner)))
}
pub fn create_token_stream(&self) -> AttrAnnotatedTokenStream {
self.0.create_token_stream()
}
}
impl fmt::Debug for LazyTokenStream {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "LazyTokenStream({:?})", self.create_token_stream())
}
}
impl<S: Encoder> Encodable<S> for LazyTokenStream {
fn encode(&self, s: &mut S) -> Result<(), S::Error> {
// Used by AST json printing.
Encodable::encode(&self.create_token_stream(), s)
}
}
impl<D: Decoder> Decodable<D> for LazyTokenStream {
fn decode(_d: &mut D) -> Result<Self, D::Error> {
panic!("Attempted to decode LazyTokenStream");
}
}
impl<CTX> HashStable<CTX> for LazyTokenStream {
fn hash_stable(&self, _hcx: &mut CTX, _hasher: &mut StableHasher) {
panic!("Attempted to compute stable hash for LazyTokenStream");
}
}
/// A `AttrAnnotatedTokenStream` is similar to a `TokenStream`, but with extra
/// information about the tokens for attribute targets. This is used
/// during expansion to perform early cfg-expansion, and to process attributes
/// during proc-macro invocations.
#[derive(Clone, Debug, Default, Encodable, Decodable)]
pub struct AttrAnnotatedTokenStream(pub Lrc<Vec<(AttrAnnotatedTokenTree, Spacing)>>);
/// Like `TokenTree`, but for `AttrAnnotatedTokenStream`
#[derive(Clone, Debug, Encodable, Decodable)]
pub enum AttrAnnotatedTokenTree {
Token(Token),
Delimited(DelimSpan, DelimToken, AttrAnnotatedTokenStream),
/// Stores the attributes for an attribute target,
/// along with the tokens for that attribute target.
/// See `AttributesData` for more information
Attributes(AttributesData),
}
impl AttrAnnotatedTokenStream {
pub fn new(tokens: Vec<(AttrAnnotatedTokenTree, Spacing)>) -> AttrAnnotatedTokenStream {
AttrAnnotatedTokenStream(Lrc::new(tokens))
}
/// Converts this `AttrAnnotatedTokenStream` to a plain `TokenStream
/// During conversion, `AttrAnnotatedTokenTree::Attributes` get 'flattened'
/// back to a `TokenStream` of the form `outer_attr attr_target`.
/// If there are inner attributes, they are inserted into the proper
/// place in the attribute target tokens.
pub fn to_tokenstream(&self) -> TokenStream {
let trees: Vec<_> = self
.0
.iter()
.flat_map(|tree| match &tree.0 {
AttrAnnotatedTokenTree::Token(inner) => {
smallvec![(TokenTree::Token(inner.clone()), tree.1)].into_iter()
}
AttrAnnotatedTokenTree::Delimited(span, delim, stream) => smallvec![(
TokenTree::Delimited(*span, *delim, stream.to_tokenstream()),
tree.1,
)]
.into_iter(),
AttrAnnotatedTokenTree::Attributes(data) => {
let mut outer_attrs = Vec::new();
let mut inner_attrs = Vec::new();
for attr in &data.attrs {
match attr.style {
crate::AttrStyle::Outer => {
outer_attrs.push(attr);
}
crate::AttrStyle::Inner => {
inner_attrs.push(attr);
}
}
}
let mut target_tokens: Vec<_> = data
.tokens
.create_token_stream()
.to_tokenstream()
.0
.iter()
.cloned()
.collect();
if!inner_attrs.is_empty() {
let mut found = false;
// Check the last two trees (to account for a trailing semi)
for (tree, _) in target_tokens.iter_mut().rev().take(2) {
if let TokenTree::Delimited(span, delim, delim_tokens) = tree {
// Inner attributes are only supported on extern blocks, functions, impls,
// and modules. All of these have their inner attributes placed at
// the beginning of the rightmost outermost braced group:
// e.g. fn foo() { #![my_attr} }
//
// Therefore, we can insert them back into the right location
// without needing to do any extra position tracking.
//
// Note: Outline modules are an exception - they can
// have attributes like `#![my_attr]` at the start of a file.
// Support for custom attributes in this position is not
// properly implemented - we always synthesize fake tokens,
// so we never reach this code.
let mut builder = TokenStreamBuilder::new();
for inner_attr in inner_attrs {
builder.push(inner_attr.tokens().to_tokenstream());
}
builder.push(delim_tokens.clone());
*tree = TokenTree::Delimited(*span, *delim, builder.build());
found = true;
break;
}
}
assert!(
found,
"Failed to find trailing delimited group in: {:?}",
target_tokens
);
}
let mut flat: SmallVec<[_; 1]> = SmallVec::new();
for attr in outer_attrs {
// FIXME: Make this more efficient
flat.extend(attr.tokens().to_tokenstream().0.clone().iter().cloned());
}
flat.extend(target_tokens);
flat.into_iter()
}
})
.collect();
TokenStream::new(trees)
}
}
/// Stores the tokens for an attribute target, along
/// with its attributes.
///
/// This is constructed during parsing when we need to capture
/// tokens.
///
/// For example, `#[cfg(FALSE)] struct Foo {}` would
/// have an `attrs` field containing the `#[cfg(FALSE)]` attr,
/// and a `tokens` field storing the (unparesd) tokens `struct Foo {}`
#[derive(Clone, Debug, Encodable, Decodable)]
pub struct AttributesData {
/// Attributes, both outer and inner.
/// These are stored in the original order that they were parsed in.
pub attrs: AttrVec,
/// The underlying tokens for the attribute target that `attrs`
/// are applied to
pub tokens: LazyTokenStream,
}
/// A `TokenStream` is an abstract sequence of tokens, organized into [`TokenTree`]s.
///
/// The goal is for procedural macros to work with `TokenStream`s and `TokenTree`s
/// instead of a representation of the abstract syntax tree.
/// Today's `TokenTree`s can still contain AST via `token::Interpolated` for
/// backwards compatibility.
#[derive(Clone, Debug, Default, Encodable, Decodable)]
pub struct TokenStream(pub(crate) Lrc<Vec<TreeAndSpacing>>);
pub type TreeAndSpacing = (TokenTree, Spacing);
// `TokenStream` is used a lot. Make sure it doesn't unintentionally get bigger.
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(TokenStream, 8);
#[derive(Clone, Copy, Debug, PartialEq, Encodable, Decodable)]
pub enum Spacing {
Alone,
Joint,
}
impl TokenStream {
/// Given a `TokenStream` with a `Stream` of only two arguments, return a new `TokenStream`
/// separating the two arguments with a comma for diagnostic suggestions.
pub fn add_comma(&self) -> Option<(TokenStream, Span)> {
// Used to suggest if a user writes `foo!(a b);`
let mut suggestion = None;
let mut iter = self.0.iter().enumerate().peekable();
while let Some((pos, ts)) = iter.next() {
if let Some((_, next)) = iter.peek() {
let sp = match (&ts, &next) {
(_, (TokenTree::Token(Token { kind: token::Comma,.. }), _)) => continue,
(
(TokenTree::Token(token_left), Spacing::Alone),
(TokenTree::Token(token_right), _),
) if ((token_left.is_ident() &&!token_left.is_reserved_ident())
|| token_left.is_lit())
&& ((token_right.is_ident() &&!token_right.is_reserved_ident())
|| token_right.is_lit()) =>
{
token_left.span
}
((TokenTree::Delimited(sp,..), Spacing::Alone), _) => sp.entire(),
_ => continue,
};
let sp = sp.shrink_to_hi();
let comma = (TokenTree::token(token::Comma, sp), Spacing::Alone);
suggestion = Some((pos, comma, sp));
}
}
if let Some((pos, comma, sp)) = suggestion {
let mut new_stream = Vec::with_capacity(self.0.len() + 1);
let parts = self.0.split_at(pos + 1);
new_stream.extend_from_slice(parts.0);
new_stream.push(comma);
new_stream.extend_from_slice(parts.1);
return Some((TokenStream::new(new_stream), sp));
}
None
}
}
impl From<(AttrAnnotatedTokenTree, Spacing)> for AttrAnnotatedTokenStream {
fn from((tree, spacing): (AttrAnnotatedTokenTree, Spacing)) -> AttrAnnotatedTokenStream {
AttrAnnotatedTokenStream::new(vec![(tree, spacing)])
}
}
impl From<TokenTree> for TokenStream {
fn from(tree: TokenTree) -> TokenStream {
TokenStream::new(vec![(tree, Spacing::Alone)])
}
}
impl From<TokenTree> for TreeAndSpacing {
fn from(tree: TokenTree) -> TreeAndSpacing {
(tree, Spacing::Alone)
}
}
impl iter::FromIterator<TokenTree> for TokenStream {
fn from_iter<I: IntoIterator<Item = TokenTree>>(iter: I) -> Self {
TokenStream::new(iter.into_iter().map(Into::into).collect::<Vec<TreeAndSpacing>>())
}
}
impl Eq for TokenStream {}
impl PartialEq<TokenStream> for TokenStream {
fn eq(&self, other: &TokenStream) -> bool {
self.trees().eq(other.trees())
}
}
impl TokenStream {
pub fn new(streams: Vec<TreeAndSpacing>) -> TokenStream {
TokenStream(Lrc::new(streams))
}
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
pub fn len(&self) -> usize
|
pub fn from_streams(mut streams: SmallVec<[TokenStream; 2]>) -> TokenStream {
match streams.len() {
0 => TokenStream::default(),
1 => streams.pop().unwrap(),
_ => {
// We are going to extend the first stream in `streams` with
// the elements from the subsequent streams. This requires
// using `make_mut()` on the first stream, and in practice this
// doesn't cause cloning 99.9% of the time.
//
// One very common use case is when `streams` has two elements,
// where the first stream has any number of elements within
// (often 1, but sometimes many more) and the second stream has
// a single element within.
// Determine how much the first stream will be extended.
// Needed to avoid quadratic blow up from on-the-fly
// reallocations (#57735).
let num_appends = streams.iter().skip(1).map(|ts| ts.len()).sum();
// Get the first stream. If it's `None`, create an empty
// stream.
let mut iter = streams.drain(..);
let mut first_stream_lrc = iter.next().unwrap().0;
// Append the elements to the first stream, after reserving
// space for them.
let first_vec_mut = Lrc::make_mut(&mut first_stream_lrc);
first_vec_mut.reserve(num_appends);
for stream in iter {
first_vec_mut.extend(stream.0.iter().cloned());
}
// Create the final `TokenStream`.
TokenStream(first_stream_lrc)
}
}
}
pub fn trees(&self) -> Cursor {
self.clone().into_trees()
}
pub fn into_trees(self) -> Cursor {
Cursor::new(self)
}
/// Compares two `TokenStream`s, checking equality without regarding span information.
pub fn eq_unspanned(&self, other: &TokenStream) -> bool {
let mut t1 = self.trees();
let mut t2 = other.trees();
for (t1, t2) in iter::zip(&mut t1, &mut t2) {
if!t1.eq_unspanned(&t2) {
return false;
}
}
t1.next().is_none() && t2.next().is_none()
}
pub fn map_enumerated<F: FnMut(usize, &TokenTree) -> TokenTree>(self, mut f: F) -> TokenStream {
TokenStream(Lrc::new(
self.0
.iter()
.enumerate()
.map(|(i, (tree, is_joint))| (f(i, tree), *is_joint))
.collect(),
))
}
}
// 99.5%+ of the time we have 1 or 2 elements in this vector.
#[derive(Clone)]
pub struct TokenStreamBuilder(SmallVec<[TokenStream; 2]>);
impl TokenStreamBuilder {
pub fn new() -> TokenStreamBuilder {
TokenStreamBuilder(SmallVec::new())
}
pub fn push<T: Into<TokenStream>>(&mut self, stream: T) {
let mut stream = stream.into();
// If `self` is not empty and the last tree within the last stream is a
// token tree marked with `Joint`...
if let Some(TokenStream(ref mut last_stream_lrc)) = self.0.last_mut() {
if let Some((TokenTree::Token(last_token), Spacing::Joint)) = last_stream_lrc.last() {
//...and `stream` is not empty and the first tree within it is
// a token tree...
let TokenStream(ref mut stream_lrc) = stream;
if let Some((TokenTree::Token(token), spacing)) = stream_lrc.first() {
//...and the two tokens can be glued together...
if let Some(glued_tok) = last_token.glue(&token) {
//...then do so, by overwriting the last token
// tree in `self` and removing the first token tree
// from `stream`. This requires using `make_mut()`
// on the last stream in `self` and on `stream`,
// and in practice this doesn't cause cloning 99.9%
// of the time.
// Overwrite the last token tree with the merged
// token.
let last_vec_mut = Lrc::make_mut(last_stream_lrc);
*last_vec_mut.last_mut().unwrap() = (TokenTree::Token(glued
|
{
self.0.len()
}
|
identifier_body
|
tokenstream.rs
|
TokenStream {
fn create_token_stream(&self) -> AttrAnnotatedTokenStream {
self.clone()
}
}
/// A lazy version of [`TokenStream`], which defers creation
/// of an actual `TokenStream` until it is needed.
/// `Box` is here only to reduce the structure size.
#[derive(Clone)]
pub struct LazyTokenStream(Lrc<Box<dyn CreateTokenStream>>);
impl LazyTokenStream {
pub fn new(inner: impl CreateTokenStream +'static) -> LazyTokenStream {
LazyTokenStream(Lrc::new(Box::new(inner)))
}
pub fn create_token_stream(&self) -> AttrAnnotatedTokenStream {
self.0.create_token_stream()
}
}
impl fmt::Debug for LazyTokenStream {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "LazyTokenStream({:?})", self.create_token_stream())
}
}
impl<S: Encoder> Encodable<S> for LazyTokenStream {
fn encode(&self, s: &mut S) -> Result<(), S::Error> {
// Used by AST json printing.
Encodable::encode(&self.create_token_stream(), s)
}
}
impl<D: Decoder> Decodable<D> for LazyTokenStream {
fn decode(_d: &mut D) -> Result<Self, D::Error> {
panic!("Attempted to decode LazyTokenStream");
}
}
impl<CTX> HashStable<CTX> for LazyTokenStream {
fn hash_stable(&self, _hcx: &mut CTX, _hasher: &mut StableHasher) {
panic!("Attempted to compute stable hash for LazyTokenStream");
}
}
/// A `AttrAnnotatedTokenStream` is similar to a `TokenStream`, but with extra
/// information about the tokens for attribute targets. This is used
/// during expansion to perform early cfg-expansion, and to process attributes
/// during proc-macro invocations.
#[derive(Clone, Debug, Default, Encodable, Decodable)]
pub struct AttrAnnotatedTokenStream(pub Lrc<Vec<(AttrAnnotatedTokenTree, Spacing)>>);
/// Like `TokenTree`, but for `AttrAnnotatedTokenStream`
#[derive(Clone, Debug, Encodable, Decodable)]
pub enum AttrAnnotatedTokenTree {
Token(Token),
Delimited(DelimSpan, DelimToken, AttrAnnotatedTokenStream),
/// Stores the attributes for an attribute target,
/// along with the tokens for that attribute target.
/// See `AttributesData` for more information
Attributes(AttributesData),
}
impl AttrAnnotatedTokenStream {
pub fn new(tokens: Vec<(AttrAnnotatedTokenTree, Spacing)>) -> AttrAnnotatedTokenStream {
AttrAnnotatedTokenStream(Lrc::new(tokens))
}
/// Converts this `AttrAnnotatedTokenStream` to a plain `TokenStream
/// During conversion, `AttrAnnotatedTokenTree::Attributes` get 'flattened'
/// back to a `TokenStream` of the form `outer_attr attr_target`.
/// If there are inner attributes, they are inserted into the proper
/// place in the attribute target tokens.
pub fn to_tokenstream(&self) -> TokenStream {
let trees: Vec<_> = self
.0
.iter()
.flat_map(|tree| match &tree.0 {
AttrAnnotatedTokenTree::Token(inner) => {
smallvec![(TokenTree::Token(inner.clone()), tree.1)].into_iter()
}
AttrAnnotatedTokenTree::Delimited(span, delim, stream) => smallvec![(
TokenTree::Delimited(*span, *delim, stream.to_tokenstream()),
tree.1,
)]
.into_iter(),
AttrAnnotatedTokenTree::Attributes(data) => {
let mut outer_attrs = Vec::new();
let mut inner_attrs = Vec::new();
for attr in &data.attrs {
match attr.style {
crate::AttrStyle::Outer => {
outer_attrs.push(attr);
}
crate::AttrStyle::Inner => {
inner_attrs.push(attr);
}
}
}
let mut target_tokens: Vec<_> = data
.tokens
.create_token_stream()
.to_tokenstream()
.0
.iter()
.cloned()
.collect();
if!inner_attrs.is_empty() {
let mut found = false;
// Check the last two trees (to account for a trailing semi)
for (tree, _) in target_tokens.iter_mut().rev().take(2) {
if let TokenTree::Delimited(span, delim, delim_tokens) = tree {
// Inner attributes are only supported on extern blocks, functions, impls,
// and modules. All of these have their inner attributes placed at
// the beginning of the rightmost outermost braced group:
// e.g. fn foo() { #![my_attr} }
//
// Therefore, we can insert them back into the right location
// without needing to do any extra position tracking.
//
// Note: Outline modules are an exception - they can
// have attributes like `#![my_attr]` at the start of a file.
// Support for custom attributes in this position is not
// properly implemented - we always synthesize fake tokens,
// so we never reach this code.
let mut builder = TokenStreamBuilder::new();
for inner_attr in inner_attrs {
builder.push(inner_attr.tokens().to_tokenstream());
}
builder.push(delim_tokens.clone());
*tree = TokenTree::Delimited(*span, *delim, builder.build());
found = true;
break;
}
}
assert!(
found,
"Failed to find trailing delimited group in: {:?}",
target_tokens
);
}
let mut flat: SmallVec<[_; 1]> = SmallVec::new();
for attr in outer_attrs {
// FIXME: Make this more efficient
flat.extend(attr.tokens().to_tokenstream().0.clone().iter().cloned());
}
flat.extend(target_tokens);
flat.into_iter()
}
})
.collect();
TokenStream::new(trees)
}
}
/// Stores the tokens for an attribute target, along
/// with its attributes.
///
/// This is constructed during parsing when we need to capture
/// tokens.
///
/// For example, `#[cfg(FALSE)] struct Foo {}` would
/// have an `attrs` field containing the `#[cfg(FALSE)]` attr,
/// and a `tokens` field storing the (unparesd) tokens `struct Foo {}`
#[derive(Clone, Debug, Encodable, Decodable)]
pub struct AttributesData {
/// Attributes, both outer and inner.
/// These are stored in the original order that they were parsed in.
pub attrs: AttrVec,
/// The underlying tokens for the attribute target that `attrs`
/// are applied to
pub tokens: LazyTokenStream,
}
/// A `TokenStream` is an abstract sequence of tokens, organized into [`TokenTree`]s.
///
/// The goal is for procedural macros to work with `TokenStream`s and `TokenTree`s
/// instead of a representation of the abstract syntax tree.
/// Today's `TokenTree`s can still contain AST via `token::Interpolated` for
/// backwards compatibility.
#[derive(Clone, Debug, Default, Encodable, Decodable)]
pub struct TokenStream(pub(crate) Lrc<Vec<TreeAndSpacing>>);
pub type TreeAndSpacing = (TokenTree, Spacing);
// `TokenStream` is used a lot. Make sure it doesn't unintentionally get bigger.
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(TokenStream, 8);
#[derive(Clone, Copy, Debug, PartialEq, Encodable, Decodable)]
pub enum Spacing {
Alone,
Joint,
}
impl TokenStream {
/// Given a `TokenStream` with a `Stream` of only two arguments, return a new `TokenStream`
/// separating the two arguments with a comma for diagnostic suggestions.
pub fn add_comma(&self) -> Option<(TokenStream, Span)> {
// Used to suggest if a user writes `foo!(a b);`
let mut suggestion = None;
let mut iter = self.0.iter().enumerate().peekable();
while let Some((pos, ts)) = iter.next() {
if let Some((_, next)) = iter.peek() {
let sp = match (&ts, &next) {
(_, (TokenTree::Token(Token { kind: token::Comma,.. }), _)) => continue,
(
(TokenTree::Token(token_left), Spacing::Alone),
(TokenTree::Token(token_right), _),
) if ((token_left.is_ident() &&!token_left.is_reserved_ident())
|| token_left.is_lit())
&& ((token_right.is_ident() &&!token_right.is_reserved_ident())
|| token_right.is_lit()) =>
{
token_left.span
}
((TokenTree::Delimited(sp,..), Spacing::Alone), _) => sp.entire(),
_ => continue,
};
let sp = sp.shrink_to_hi();
let comma = (TokenTree::token(token::Comma, sp), Spacing::Alone);
suggestion = Some((pos, comma, sp));
}
}
if let Some((pos, comma, sp)) = suggestion {
let mut new_stream = Vec::with_capacity(self.0.len() + 1);
let parts = self.0.split_at(pos + 1);
new_stream.extend_from_slice(parts.0);
new_stream.push(comma);
new_stream.extend_from_slice(parts.1);
return Some((TokenStream::new(new_stream), sp));
}
None
}
}
impl From<(AttrAnnotatedTokenTree, Spacing)> for AttrAnnotatedTokenStream {
fn from((tree, spacing): (AttrAnnotatedTokenTree, Spacing)) -> AttrAnnotatedTokenStream {
AttrAnnotatedTokenStream::new(vec![(tree, spacing)])
}
}
impl From<TokenTree> for TokenStream {
fn from(tree: TokenTree) -> TokenStream {
TokenStream::new(vec![(tree, Spacing::Alone)])
}
}
impl From<TokenTree> for TreeAndSpacing {
fn from(tree: TokenTree) -> TreeAndSpacing {
(tree, Spacing::Alone)
}
}
impl iter::FromIterator<TokenTree> for TokenStream {
fn from_iter<I: IntoIterator<Item = TokenTree>>(iter: I) -> Self {
TokenStream::new(iter.into_iter().map(Into::into).collect::<Vec<TreeAndSpacing>>())
}
}
impl Eq for TokenStream {}
impl PartialEq<TokenStream> for TokenStream {
fn eq(&self, other: &TokenStream) -> bool {
self.trees().eq(other.trees())
}
}
impl TokenStream {
pub fn new(streams: Vec<TreeAndSpacing>) -> TokenStream {
TokenStream(Lrc::new(streams))
}
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
pub fn len(&self) -> usize {
self.0.len()
}
pub fn from_streams(mut streams: SmallVec<[TokenStream; 2]>) -> TokenStream {
match streams.len() {
0 => TokenStream::default(),
1 => streams.pop().unwrap(),
_ => {
// We are going to extend the first stream in `streams` with
// the elements from the subsequent streams. This requires
// using `make_mut()` on the first stream, and in practice this
// doesn't cause cloning 99.9% of the time.
//
// One very common use case is when `streams` has two elements,
// where the first stream has any number of elements within
// (often 1, but sometimes many more) and the second stream has
// a single element within.
// Determine how much the first stream will be extended.
// Needed to avoid quadratic blow up from on-the-fly
// reallocations (#57735).
let num_appends = streams.iter().skip(1).map(|ts| ts.len()).sum();
// Get the first stream. If it's `None`, create an empty
// stream.
let mut iter = streams.drain(..);
let mut first_stream_lrc = iter.next().unwrap().0;
// Append the elements to the first stream, after reserving
// space for them.
let first_vec_mut = Lrc::make_mut(&mut first_stream_lrc);
first_vec_mut.reserve(num_appends);
for stream in iter {
first_vec_mut.extend(stream.0.iter().cloned());
}
// Create the final `TokenStream`.
TokenStream(first_stream_lrc)
}
}
}
pub fn trees(&self) -> Cursor {
self.clone().into_trees()
}
pub fn into_trees(self) -> Cursor {
Cursor::new(self)
}
/// Compares two `TokenStream`s, checking equality without regarding span information.
pub fn eq_unspanned(&self, other: &TokenStream) -> bool {
let mut t1 = self.trees();
let mut t2 = other.trees();
for (t1, t2) in iter::zip(&mut t1, &mut t2) {
if!t1.eq_unspanned(&t2) {
return false;
}
}
t1.next().is_none() && t2.next().is_none()
}
pub fn map_enumerated<F: FnMut(usize, &TokenTree) -> TokenTree>(self, mut f: F) -> TokenStream {
TokenStream(Lrc::new(
self.0
.iter()
.enumerate()
.map(|(i, (tree, is_joint))| (f(i, tree), *is_joint))
.collect(),
))
}
}
// 99.5%+ of the time we have 1 or 2 elements in this vector.
#[derive(Clone)]
pub struct TokenStreamBuilder(SmallVec<[TokenStream; 2]>);
impl TokenStreamBuilder {
pub fn new() -> TokenStreamBuilder {
TokenStreamBuilder(SmallVec::new())
}
pub fn push<T: Into<TokenStream>>(&mut self, stream: T) {
let mut stream = stream.into();
// If `self` is not empty and the last tree within the last stream is a
// token tree marked with `Joint`...
if let Some(TokenStream(ref mut last_stream_lrc)) = self.0.last_mut() {
if let Some((TokenTree::Token(last_token), Spacing::Joint)) = last_stream_lrc.last() {
//...and `stream` is not empty and the first tree within it is
// a token tree...
let TokenStream(ref mut stream_lrc) = stream;
if let Some((TokenTree::Token(token), spacing)) = stream_lrc.first() {
//...and the two tokens can be glued together...
if let Some(glued_tok) = last_token.glue(&token) {
//...then do so, by overwriting the last token
// tree in `self` and removing the first token tree
// from `stream`. This requires using `make_mut()`
// on the last stream in `self` and on `stream`,
// and in practice this doesn't cause cloning 99.9%
// of the time.
// Overwrite the last token tree with the merged
// token.
let last_vec_mut = Lrc::make_mut(last_stream_lrc);
*last_vec_mut.last_mut().unwrap() = (TokenTree::Token(glued_tok), *spacing);
// Remove the first token tree from `stream`. (This
// is almost always the only tree in `stream`.)
let stream_vec_mut = Lrc::make_mut(stream_lrc);
stream_vec_mut.remove(0);
// Don't push `stream` if it's empty -- that could
// block subsequent token gluing, by getting
// between two token trees that should be glued
// together.
if!stream.is_empty() {
self.0.push(stream);
}
return;
}
}
}
}
self.0.push(stream);
}
pub fn build(self) -> TokenStream {
TokenStream::from_streams(self.0)
}
}
/// By-reference iterator over a [`TokenStream`].
#[derive(Clone)]
pub struct CursorRef<'t> {
stream: &'t TokenStream,
index: usize,
}
impl<'t> CursorRef<'t> {
fn next_with_spacing(&mut self) -> Option<&'t TreeAndSpacing> {
self.stream.0.get(self.index).map(|tree| {
self.index += 1;
tree
})
}
}
impl<'t> Iterator for CursorRef<'t> {
type Item = &'t TokenTree;
fn next(&mut self) -> Option<&'t TokenTree> {
self.next_with_spacing().map(|(tree, _)| tree)
}
}
/// Owning by-value iterator over a [`TokenStream`].
// FIXME: Many uses of this can be replaced with by-reference iterator to avoid clones.
#[derive(Clone)]
pub struct Cursor {
pub stream: TokenStream,
index: usize,
}
impl Iterator for Cursor {
type Item = TokenTree;
fn next(&mut self) -> Option<TokenTree> {
|
self.next_with_spacing().map(|(tree, _)| tree)
}
|
random_line_split
|
|
tokenstream.rs
|
: Send + Sync,
{
}
impl TokenTree {
/// Checks if this `TokenTree` is equal to the other, regardless of span information.
pub fn eq_unspanned(&self, other: &TokenTree) -> bool {
match (self, other) {
(TokenTree::Token(token), TokenTree::Token(token2)) => token.kind == token2.kind,
(TokenTree::Delimited(_, delim, tts), TokenTree::Delimited(_, delim2, tts2)) => {
delim == delim2 && tts.eq_unspanned(&tts2)
}
_ => false,
}
}
/// Retrieves the `TokenTree`'s span.
pub fn span(&self) -> Span {
match self {
TokenTree::Token(token) => token.span,
TokenTree::Delimited(sp,..) => sp.entire(),
}
}
/// Modify the `TokenTree`'s span in-place.
pub fn set_span(&mut self, span: Span) {
match self {
TokenTree::Token(token) => token.span = span,
TokenTree::Delimited(dspan,..) => *dspan = DelimSpan::from_single(span),
}
}
pub fn
|
(kind: TokenKind, span: Span) -> TokenTree {
TokenTree::Token(Token::new(kind, span))
}
/// Returns the opening delimiter as a token tree.
pub fn open_tt(span: DelimSpan, delim: DelimToken) -> TokenTree {
TokenTree::token(token::OpenDelim(delim), span.open)
}
/// Returns the closing delimiter as a token tree.
pub fn close_tt(span: DelimSpan, delim: DelimToken) -> TokenTree {
TokenTree::token(token::CloseDelim(delim), span.close)
}
pub fn uninterpolate(self) -> TokenTree {
match self {
TokenTree::Token(token) => TokenTree::Token(token.uninterpolate().into_owned()),
tt => tt,
}
}
}
impl<CTX> HashStable<CTX> for TokenStream
where
CTX: crate::HashStableContext,
{
fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
for sub_tt in self.trees() {
sub_tt.hash_stable(hcx, hasher);
}
}
}
pub trait CreateTokenStream: sync::Send + sync::Sync {
fn create_token_stream(&self) -> AttrAnnotatedTokenStream;
}
impl CreateTokenStream for AttrAnnotatedTokenStream {
fn create_token_stream(&self) -> AttrAnnotatedTokenStream {
self.clone()
}
}
/// A lazy version of [`TokenStream`], which defers creation
/// of an actual `TokenStream` until it is needed.
/// `Box` is here only to reduce the structure size.
#[derive(Clone)]
pub struct LazyTokenStream(Lrc<Box<dyn CreateTokenStream>>);
impl LazyTokenStream {
pub fn new(inner: impl CreateTokenStream +'static) -> LazyTokenStream {
LazyTokenStream(Lrc::new(Box::new(inner)))
}
pub fn create_token_stream(&self) -> AttrAnnotatedTokenStream {
self.0.create_token_stream()
}
}
impl fmt::Debug for LazyTokenStream {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "LazyTokenStream({:?})", self.create_token_stream())
}
}
impl<S: Encoder> Encodable<S> for LazyTokenStream {
fn encode(&self, s: &mut S) -> Result<(), S::Error> {
// Used by AST json printing.
Encodable::encode(&self.create_token_stream(), s)
}
}
impl<D: Decoder> Decodable<D> for LazyTokenStream {
fn decode(_d: &mut D) -> Result<Self, D::Error> {
panic!("Attempted to decode LazyTokenStream");
}
}
impl<CTX> HashStable<CTX> for LazyTokenStream {
fn hash_stable(&self, _hcx: &mut CTX, _hasher: &mut StableHasher) {
panic!("Attempted to compute stable hash for LazyTokenStream");
}
}
/// A `AttrAnnotatedTokenStream` is similar to a `TokenStream`, but with extra
/// information about the tokens for attribute targets. This is used
/// during expansion to perform early cfg-expansion, and to process attributes
/// during proc-macro invocations.
#[derive(Clone, Debug, Default, Encodable, Decodable)]
pub struct AttrAnnotatedTokenStream(pub Lrc<Vec<(AttrAnnotatedTokenTree, Spacing)>>);
/// Like `TokenTree`, but for `AttrAnnotatedTokenStream`
#[derive(Clone, Debug, Encodable, Decodable)]
pub enum AttrAnnotatedTokenTree {
Token(Token),
Delimited(DelimSpan, DelimToken, AttrAnnotatedTokenStream),
/// Stores the attributes for an attribute target,
/// along with the tokens for that attribute target.
/// See `AttributesData` for more information
Attributes(AttributesData),
}
impl AttrAnnotatedTokenStream {
pub fn new(tokens: Vec<(AttrAnnotatedTokenTree, Spacing)>) -> AttrAnnotatedTokenStream {
AttrAnnotatedTokenStream(Lrc::new(tokens))
}
/// Converts this `AttrAnnotatedTokenStream` to a plain `TokenStream
/// During conversion, `AttrAnnotatedTokenTree::Attributes` get 'flattened'
/// back to a `TokenStream` of the form `outer_attr attr_target`.
/// If there are inner attributes, they are inserted into the proper
/// place in the attribute target tokens.
pub fn to_tokenstream(&self) -> TokenStream {
let trees: Vec<_> = self
.0
.iter()
.flat_map(|tree| match &tree.0 {
AttrAnnotatedTokenTree::Token(inner) => {
smallvec![(TokenTree::Token(inner.clone()), tree.1)].into_iter()
}
AttrAnnotatedTokenTree::Delimited(span, delim, stream) => smallvec![(
TokenTree::Delimited(*span, *delim, stream.to_tokenstream()),
tree.1,
)]
.into_iter(),
AttrAnnotatedTokenTree::Attributes(data) => {
let mut outer_attrs = Vec::new();
let mut inner_attrs = Vec::new();
for attr in &data.attrs {
match attr.style {
crate::AttrStyle::Outer => {
outer_attrs.push(attr);
}
crate::AttrStyle::Inner => {
inner_attrs.push(attr);
}
}
}
let mut target_tokens: Vec<_> = data
.tokens
.create_token_stream()
.to_tokenstream()
.0
.iter()
.cloned()
.collect();
if!inner_attrs.is_empty() {
let mut found = false;
// Check the last two trees (to account for a trailing semi)
for (tree, _) in target_tokens.iter_mut().rev().take(2) {
if let TokenTree::Delimited(span, delim, delim_tokens) = tree {
// Inner attributes are only supported on extern blocks, functions, impls,
// and modules. All of these have their inner attributes placed at
// the beginning of the rightmost outermost braced group:
// e.g. fn foo() { #![my_attr} }
//
// Therefore, we can insert them back into the right location
// without needing to do any extra position tracking.
//
// Note: Outline modules are an exception - they can
// have attributes like `#![my_attr]` at the start of a file.
// Support for custom attributes in this position is not
// properly implemented - we always synthesize fake tokens,
// so we never reach this code.
let mut builder = TokenStreamBuilder::new();
for inner_attr in inner_attrs {
builder.push(inner_attr.tokens().to_tokenstream());
}
builder.push(delim_tokens.clone());
*tree = TokenTree::Delimited(*span, *delim, builder.build());
found = true;
break;
}
}
assert!(
found,
"Failed to find trailing delimited group in: {:?}",
target_tokens
);
}
let mut flat: SmallVec<[_; 1]> = SmallVec::new();
for attr in outer_attrs {
// FIXME: Make this more efficient
flat.extend(attr.tokens().to_tokenstream().0.clone().iter().cloned());
}
flat.extend(target_tokens);
flat.into_iter()
}
})
.collect();
TokenStream::new(trees)
}
}
/// Stores the tokens for an attribute target, along
/// with its attributes.
///
/// This is constructed during parsing when we need to capture
/// tokens.
///
/// For example, `#[cfg(FALSE)] struct Foo {}` would
/// have an `attrs` field containing the `#[cfg(FALSE)]` attr,
/// and a `tokens` field storing the (unparesd) tokens `struct Foo {}`
#[derive(Clone, Debug, Encodable, Decodable)]
pub struct AttributesData {
/// Attributes, both outer and inner.
/// These are stored in the original order that they were parsed in.
pub attrs: AttrVec,
/// The underlying tokens for the attribute target that `attrs`
/// are applied to
pub tokens: LazyTokenStream,
}
/// A `TokenStream` is an abstract sequence of tokens, organized into [`TokenTree`]s.
///
/// The goal is for procedural macros to work with `TokenStream`s and `TokenTree`s
/// instead of a representation of the abstract syntax tree.
/// Today's `TokenTree`s can still contain AST via `token::Interpolated` for
/// backwards compatibility.
#[derive(Clone, Debug, Default, Encodable, Decodable)]
pub struct TokenStream(pub(crate) Lrc<Vec<TreeAndSpacing>>);
pub type TreeAndSpacing = (TokenTree, Spacing);
// `TokenStream` is used a lot. Make sure it doesn't unintentionally get bigger.
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(TokenStream, 8);
#[derive(Clone, Copy, Debug, PartialEq, Encodable, Decodable)]
pub enum Spacing {
Alone,
Joint,
}
impl TokenStream {
/// Given a `TokenStream` with a `Stream` of only two arguments, return a new `TokenStream`
/// separating the two arguments with a comma for diagnostic suggestions.
pub fn add_comma(&self) -> Option<(TokenStream, Span)> {
// Used to suggest if a user writes `foo!(a b);`
let mut suggestion = None;
let mut iter = self.0.iter().enumerate().peekable();
while let Some((pos, ts)) = iter.next() {
if let Some((_, next)) = iter.peek() {
let sp = match (&ts, &next) {
(_, (TokenTree::Token(Token { kind: token::Comma,.. }), _)) => continue,
(
(TokenTree::Token(token_left), Spacing::Alone),
(TokenTree::Token(token_right), _),
) if ((token_left.is_ident() &&!token_left.is_reserved_ident())
|| token_left.is_lit())
&& ((token_right.is_ident() &&!token_right.is_reserved_ident())
|| token_right.is_lit()) =>
{
token_left.span
}
((TokenTree::Delimited(sp,..), Spacing::Alone), _) => sp.entire(),
_ => continue,
};
let sp = sp.shrink_to_hi();
let comma = (TokenTree::token(token::Comma, sp), Spacing::Alone);
suggestion = Some((pos, comma, sp));
}
}
if let Some((pos, comma, sp)) = suggestion {
let mut new_stream = Vec::with_capacity(self.0.len() + 1);
let parts = self.0.split_at(pos + 1);
new_stream.extend_from_slice(parts.0);
new_stream.push(comma);
new_stream.extend_from_slice(parts.1);
return Some((TokenStream::new(new_stream), sp));
}
None
}
}
impl From<(AttrAnnotatedTokenTree, Spacing)> for AttrAnnotatedTokenStream {
fn from((tree, spacing): (AttrAnnotatedTokenTree, Spacing)) -> AttrAnnotatedTokenStream {
AttrAnnotatedTokenStream::new(vec![(tree, spacing)])
}
}
impl From<TokenTree> for TokenStream {
fn from(tree: TokenTree) -> TokenStream {
TokenStream::new(vec![(tree, Spacing::Alone)])
}
}
impl From<TokenTree> for TreeAndSpacing {
fn from(tree: TokenTree) -> TreeAndSpacing {
(tree, Spacing::Alone)
}
}
impl iter::FromIterator<TokenTree> for TokenStream {
fn from_iter<I: IntoIterator<Item = TokenTree>>(iter: I) -> Self {
TokenStream::new(iter.into_iter().map(Into::into).collect::<Vec<TreeAndSpacing>>())
}
}
impl Eq for TokenStream {}
impl PartialEq<TokenStream> for TokenStream {
fn eq(&self, other: &TokenStream) -> bool {
self.trees().eq(other.trees())
}
}
impl TokenStream {
pub fn new(streams: Vec<TreeAndSpacing>) -> TokenStream {
TokenStream(Lrc::new(streams))
}
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
pub fn len(&self) -> usize {
self.0.len()
}
pub fn from_streams(mut streams: SmallVec<[TokenStream; 2]>) -> TokenStream {
match streams.len() {
0 => TokenStream::default(),
1 => streams.pop().unwrap(),
_ => {
// We are going to extend the first stream in `streams` with
// the elements from the subsequent streams. This requires
// using `make_mut()` on the first stream, and in practice this
// doesn't cause cloning 99.9% of the time.
//
// One very common use case is when `streams` has two elements,
// where the first stream has any number of elements within
// (often 1, but sometimes many more) and the second stream has
// a single element within.
// Determine how much the first stream will be extended.
// Needed to avoid quadratic blow up from on-the-fly
// reallocations (#57735).
let num_appends = streams.iter().skip(1).map(|ts| ts.len()).sum();
// Get the first stream. If it's `None`, create an empty
// stream.
let mut iter = streams.drain(..);
let mut first_stream_lrc = iter.next().unwrap().0;
// Append the elements to the first stream, after reserving
// space for them.
let first_vec_mut = Lrc::make_mut(&mut first_stream_lrc);
first_vec_mut.reserve(num_appends);
for stream in iter {
first_vec_mut.extend(stream.0.iter().cloned());
}
// Create the final `TokenStream`.
TokenStream(first_stream_lrc)
}
}
}
pub fn trees(&self) -> Cursor {
self.clone().into_trees()
}
pub fn into_trees(self) -> Cursor {
Cursor::new(self)
}
/// Compares two `TokenStream`s, checking equality without regarding span information.
pub fn eq_unspanned(&self, other: &TokenStream) -> bool {
let mut t1 = self.trees();
let mut t2 = other.trees();
for (t1, t2) in iter::zip(&mut t1, &mut t2) {
if!t1.eq_unspanned(&t2) {
return false;
}
}
t1.next().is_none() && t2.next().is_none()
}
pub fn map_enumerated<F: FnMut(usize, &TokenTree) -> TokenTree>(self, mut f: F) -> TokenStream {
TokenStream(Lrc::new(
self.0
.iter()
.enumerate()
.map(|(i, (tree, is_joint))| (f(i, tree), *is_joint))
.collect(),
))
}
}
// 99.5%+ of the time we have 1 or 2 elements in this vector.
#[derive(Clone)]
pub struct TokenStreamBuilder(SmallVec<[TokenStream; 2]>);
impl TokenStreamBuilder {
pub fn new() -> TokenStreamBuilder {
TokenStreamBuilder(SmallVec::new())
}
pub fn push<T: Into<TokenStream>>(&mut self, stream: T) {
let mut stream = stream.into();
// If `self` is not empty and the last tree within the last stream is a
// token tree marked with `Joint`...
if let Some(TokenStream(ref mut last_stream_lrc)) = self.0.last_mut() {
if let Some((TokenTree::Token(last_token), Spacing::Joint)) = last_stream_lrc.last() {
//...and `stream` is not empty and the first tree within it is
// a token tree...
let TokenStream(ref mut stream_lrc) = stream;
if let Some((TokenTree::Token(token), spacing)) = stream_lrc.first() {
//...and the two tokens can be glued together...
if let Some(glued_tok) = last_token.glue(&token) {
//...then do so, by overwriting the last token
// tree in `self` and removing the first token tree
// from `stream`. This requires using `make_mut()`
// on the last stream in `self` and on `stream`,
// and in practice this doesn't cause cloning 99.9%
// of the time.
// Overwrite the last token tree with the merged
// token.
let last_vec_mut = Lrc::make_mut(last_stream_lrc);
*last_vec_mut.last_mut().unwrap() = (TokenTree::Token(glued
|
token
|
identifier_name
|
kind.rs
|
b: &Block, s: Span, n: NodeId, _: ()) {
check_fn(self, fk, fd, b, s, n);
}
fn visit_ty(&mut self, t: &Ty, _: ()) {
check_ty(self, t);
}
fn visit_item(&mut self, i: &Item, _: ()) {
check_item(self, i);
}
}
pub fn check_crate(tcx: &ty::ctxt,
method_map: typeck::MethodMap,
krate: &Crate) {
let mut ctx = Context {
tcx: tcx,
method_map: method_map,
};
visit::walk_crate(&mut ctx, krate, ());
tcx.sess.abort_if_errors();
}
fn check_struct_safe_for_destructor(cx: &mut Context,
span: Span,
struct_did: DefId) {
let struct_tpt = ty::lookup_item_type(cx.tcx, struct_did);
if!struct_tpt.generics.has_type_params() {
let struct_ty = ty::mk_struct(cx.tcx, struct_did, ty::substs {
regions: ty::NonerasedRegions(OwnedSlice::empty()),
self_ty: None,
tps: Vec::new()
});
if!ty::type_is_sendable(cx.tcx, struct_ty) {
cx.tcx.sess.span_err(span,
"cannot implement a destructor on a \
structure that does not satisfy Send");
cx.tcx.sess.span_note(span,
"use \"#[unsafe_destructor]\" on the \
implementation to force the compiler to \
allow this");
}
} else {
cx.tcx.sess.span_err(span,
"cannot implement a destructor on a structure \
with type parameters");
cx.tcx.sess.span_note(span,
"use \"#[unsafe_destructor]\" on the \
implementation to force the compiler to \
allow this");
}
}
fn check_impl_of_trait(cx: &mut Context, it: &Item, trait_ref: &TraitRef, self_type: &Ty) {
let ast_trait_def = *cx.tcx.def_map.borrow()
.find(&trait_ref.ref_id)
.expect("trait ref not in def map!");
let trait_def_id = ast_util::def_id_of_def(ast_trait_def);
let trait_def = *cx.tcx.trait_defs.borrow()
.find(&trait_def_id)
.expect("trait def not in trait-defs map!");
// If this trait has builtin-kind supertraits, meet them.
let self_ty: ty::t = ty::node_id_to_type(cx.tcx, it.id);
debug!("checking impl with self type {:?}", ty::get(self_ty).sty);
check_builtin_bounds(cx, self_ty, trait_def.bounds, |missing| {
cx.tcx.sess.span_err(self_type.span,
format!("the type `{}', which does not fulfill `{}`, cannot implement this \
trait", ty_to_str(cx.tcx, self_ty), missing.user_string(cx.tcx)));
cx.tcx.sess.span_note(self_type.span,
format!("types implementing this trait must fulfill `{}`",
trait_def.bounds.user_string(cx.tcx)));
});
// If this is a destructor, check kinds.
if cx.tcx.lang_items.drop_trait() == Some(trait_def_id) {
match self_type.node {
TyPath(_, ref bounds, path_node_id) => {
assert!(bounds.is_none());
let struct_def = cx.tcx.def_map.borrow().get_copy(&path_node_id);
let struct_did = ast_util::def_id_of_def(struct_def);
check_struct_safe_for_destructor(cx, self_type.span, struct_did);
}
_ => {
cx.tcx.sess.span_bug(self_type.span,
"the self type for the Drop trait impl is not a path");
}
}
}
}
fn check_item(cx: &mut Context, item: &Item) {
if!attr::contains_name(item.attrs.as_slice(), "unsafe_destructor") {
match item.node {
ItemImpl(_, Some(ref trait_ref), self_type, _) => {
check_impl_of_trait(cx, item, trait_ref, self_type);
}
_ => {}
}
}
visit::walk_item(cx, item, ());
}
// Yields the appropriate function to check the kind of closed over
// variables. `id` is the NodeId for some expression that creates the
// closure.
fn with_appropriate_checker(cx: &Context,
id: NodeId,
b: |checker: |&Context, @freevar_entry||) {
fn check_for_uniq(cx: &Context, fv: &freevar_entry, bounds: ty::BuiltinBounds) {
// all captured data must be owned, regardless of whether it is
// moved in or copied in.
let id = ast_util::def_id_of_def(fv.def).node;
let var_t = ty::node_id_to_type(cx.tcx, id);
check_freevar_bounds(cx, fv.span, var_t, bounds, None);
}
fn check_for_block(cx: &Context, fv: &freevar_entry,
bounds: ty::BuiltinBounds, region: ty::Region) {
let id = ast_util::def_id_of_def(fv.def).node;
let var_t = ty::node_id_to_type(cx.tcx, id);
// FIXME(#3569): Figure out whether the implicit borrow is actually
// mutable. Currently we assume all upvars are referenced mutably.
let implicit_borrowed_type = ty::mk_mut_rptr(cx.tcx, region, var_t);
check_freevar_bounds(cx, fv.span, implicit_borrowed_type,
bounds, Some(var_t));
}
fn check_for_bare(cx: &Context, fv: @freevar_entry) {
cx.tcx.sess.span_err(
fv.span,
"can't capture dynamic environment in a fn item; \
use the || {... } closure form instead");
} // same check is done in resolve.rs, but shouldn't be done
let fty = ty::node_id_to_type(cx.tcx, id);
match ty::get(fty).sty {
ty::ty_closure(~ty::ClosureTy {
sigil: OwnedSigil,
bounds: bounds,
..
}) => {
b(|cx, fv| check_for_uniq(cx, fv, bounds))
}
ty::ty_closure(~ty::ClosureTy {
sigil: ManagedSigil,
..
}) => {
// can't happen
fail!("internal error: saw closure with managed sigil (@fn)");
}
ty::ty_closure(~ty::ClosureTy {
sigil: BorrowedSigil,
bounds: bounds,
region: region,
..
}) => {
b(|cx, fv| check_for_block(cx, fv, bounds, region))
}
ty::ty_bare_fn(_) => {
b(check_for_bare)
}
ref s => {
cx.tcx.sess.bug(
format!("expect fn type in kind checker, not {:?}", s));
}
}
}
// Check that the free variables used in a shared/sendable closure conform
// to the copy/move kind bounds. Then recursively check the function body.
fn check_fn(
cx: &mut Context,
fk: &visit::FnKind,
decl: &FnDecl,
body: &Block,
sp: Span,
fn_id: NodeId) {
// Check kinds on free variables:
with_appropriate_checker(cx, fn_id, |chk| {
let r = freevars::get_freevars(cx.tcx, fn_id);
for fv in r.iter() {
chk(cx, *fv);
}
});
visit::walk_fn(cx, fk, decl, body, sp, fn_id, ());
}
pub fn check_expr(cx: &mut Context, e: &Expr) {
debug!("kind::check_expr({})", expr_to_str(e));
// Handle any kind bounds on type parameters
{
let method_map = cx.method_map.borrow();
let method = method_map.find(&typeck::MethodCall::expr(e.id));
let node_type_substs = cx.tcx.node_type_substs.borrow();
let r = match method {
Some(method) => Some(&method.substs.tps),
None => node_type_substs.find(&e.id)
};
for ts in r.iter() {
let def_map = cx.tcx.def_map.borrow();
let type_param_defs = match e.node {
ExprPath(_) => {
let did = ast_util::def_id_of_def(def_map.get_copy(&e.id));
ty::lookup_item_type(cx.tcx, did).generics.type_param_defs.clone()
}
_ => {
// Type substitutions should only occur on paths and
// method calls, so this needs to be a method call.
// Even though the callee_id may have been the id with
// node_type_substs, e.id is correct here.
match method {
Some(method) => {
ty::method_call_type_param_defs(cx.tcx, method.origin)
}
None => {
cx.tcx.sess.span_bug(e.span,
"non path/method call expr has type substs??");
}
}
}
};
if ts.len()!= type_param_defs.len() {
// Fail earlier to make debugging easier
fail!("internal error: in kind::check_expr, length \
mismatch between actual and declared bounds: actual = \
{}, declared = {}",
ts.repr(cx.tcx),
type_param_defs.repr(cx.tcx));
}
for (&ty, type_param_def) in ts.iter().zip(type_param_defs.iter()) {
check_typaram_bounds(cx, e.span, ty, type_param_def)
}
}
}
match e.node {
ExprUnary(UnBox, interior) => {
let interior_type = ty::expr_ty(cx.tcx, interior);
let _ = check_static(cx.tcx, interior_type, interior.span);
}
ExprCast(source, _) => {
let source_ty = ty::expr_ty(cx.tcx, source);
let target_ty = ty::expr_ty(cx.tcx, e);
check_trait_cast(cx, source_ty, target_ty, source.span);
}
ExprRepeat(element, count_expr) => {
let count = ty::eval_repeat_count(cx.tcx, count_expr);
if count > 1 {
let element_ty = ty::expr_ty(cx.tcx, element);
check_copy(cx, element_ty, element.span,
"repeated element will be copied");
}
}
_ => {}
}
// Search for auto-adjustments to find trait coercions.
match cx.tcx.adjustments.borrow().find(&e.id) {
Some(adjustment) => {
match **adjustment {
ty::AutoObject(..) => {
let source_ty = ty::expr_ty(cx.tcx, e);
let target_ty = ty::expr_ty_adjusted(cx.tcx, e,
&*cx.method_map.borrow());
check_trait_cast(cx, source_ty, target_ty, e.span);
}
ty::AutoAddEnv(..) |
ty::AutoDerefRef(..) => {}
}
}
None => {}
}
visit::walk_expr(cx, e, ());
}
fn check_trait_cast(cx: &mut Context, source_ty: ty::t, target_ty: ty::t, span: Span) {
check_cast_for_escaping_regions(cx, source_ty, target_ty, span);
match ty::get(target_ty).sty {
ty::ty_trait(~ty::TyTrait { bounds,.. }) => {
check_trait_cast_bounds(cx, span, source_ty, bounds);
}
_ => {}
}
}
fn check_ty(cx: &mut Context, aty: &Ty) {
match aty.node {
TyPath(_, _, id) => {
let node_type_substs = cx.tcx.node_type_substs.borrow();
let r = node_type_substs.find(&id);
for ts in r.iter() {
let def_map = cx.tcx.def_map.borrow();
let did = ast_util::def_id_of_def(def_map.get_copy(&id));
let generics = ty::lookup_item_type(cx.tcx, did).generics;
let type_param_defs = generics.type_param_defs();
for (&ty, type_param_def) in ts.iter().zip(type_param_defs.iter()) {
check_typaram_bounds(cx, aty.span, ty, type_param_def)
}
}
}
_ => {}
}
visit::walk_ty(cx, aty, ());
}
// Calls "any_missing" if any bounds were missing.
pub fn check_builtin_bounds(cx: &Context,
ty: ty::t,
bounds: ty::BuiltinBounds,
any_missing: |ty::BuiltinBounds|) {
let kind = ty::type_contents(cx.tcx, ty);
let mut missing = ty::EmptyBuiltinBounds();
for bound in bounds.iter() {
if!kind.meets_bound(cx.tcx, bound) {
missing.add(bound);
}
}
if!missing.is_empty() {
any_missing(missing);
}
}
pub fn check_typaram_bounds(cx: &Context,
sp: Span,
ty: ty::t,
type_param_def: &ty::TypeParameterDef) {
check_builtin_bounds(cx,
ty,
type_param_def.bounds.builtin_bounds,
|missing| {
cx.tcx.sess.span_err(
sp,
format!("instantiating a type parameter with an incompatible type \
`{}`, which does not fulfill `{}`",
ty_to_str(cx.tcx, ty),
missing.user_string(cx.tcx)));
});
}
pub fn check_freevar_bounds(cx: &Context, sp: Span, ty: ty::t,
bounds: ty::BuiltinBounds, referenced_ty: Option<ty::t>)
{
check_builtin_bounds(cx, ty, bounds, |missing| {
// Will be Some if the freevar is implicitly borrowed (stack closure).
// Emit a less mysterious error message in this case.
match referenced_ty {
Some(rty) => cx.tcx.sess.span_err(sp,
format!("cannot implicitly borrow variable of type `{}` in a bounded \
stack closure (implicit reference does not fulfill `{}`)",
ty_to_str(cx.tcx, rty), missing.user_string(cx.tcx))),
None => cx.tcx.sess.span_err(sp,
format!("cannot capture variable of type `{}`, which does \
not fulfill `{}`, in a bounded closure",
ty_to_str(cx.tcx, ty), missing.user_string(cx.tcx))),
}
cx.tcx.sess.span_note(
sp,
format!("this closure's environment must satisfy `{}`",
bounds.user_string(cx.tcx)));
});
}
pub fn check_trait_cast_bounds(cx: &Context, sp: Span, ty: ty::t,
bounds: ty::BuiltinBounds) {
check_builtin_bounds(cx, ty, bounds, |missing| {
cx.tcx.sess.span_err(sp,
format!("cannot pack type `{}`, which does not fulfill \
`{}`, as a trait bounded by {}",
ty_to_str(cx.tcx, ty), missing.user_string(cx.tcx),
bounds.user_string(cx.tcx)));
});
}
fn check_copy(cx: &Context, ty: ty::t, sp: Span, reason: &str) {
debug!("type_contents({})={}",
ty_to_str(cx.tcx, ty),
ty::type_contents(cx.tcx, ty).to_str());
if ty::type_moves_by_default(cx.tcx, ty) {
cx.tcx.sess.span_err(
sp, format!("copying a value of non-copyable type `{}`",
ty_to_str(cx.tcx, ty)));
cx.tcx.sess.span_note(sp, format!("{}", reason));
}
}
pub fn check_static(tcx: &ty::ctxt, ty: ty::t, sp: Span) -> bool
|
/// This is rather subtle. When we are casting a value to an instantiated
/// trait like `a as trait<'r>`, regionck already ensures that any references
/// that appear in the type of `a` are bounded by `'r` (ed.: rem
/// FIXME(#5723)). However, it is possible that there are *type parameters*
/// in the type of `a`, and those *type parameters* may have references
/// within them. We have to guarantee that the regions which appear in those
/// type parameters are not obscured.
///
/// Therefore, we ensure that one of three conditions holds:
///
/// (1) The trait instance cannot escape the current fn. This is
/// guaranteed if the region bound `&r` is some scope within the fn
/// itself. This case is safe because whatever references are
/// found within the type parameter, they must enclose the fn body
|
{
if !ty::type_is_static(tcx, ty) {
match ty::get(ty).sty {
ty::ty_param(..) => {
tcx.sess.span_err(sp,
format!("value may contain references; \
add `'static` bound to `{}`", ty_to_str(tcx, ty)));
}
_ => {
tcx.sess.span_err(sp, "value may contain references");
}
}
false
} else {
true
}
}
|
identifier_body
|
kind.rs
|
b: &Block, s: Span, n: NodeId, _: ()) {
check_fn(self, fk, fd, b, s, n);
}
fn visit_ty(&mut self, t: &Ty, _: ()) {
check_ty(self, t);
}
fn visit_item(&mut self, i: &Item, _: ()) {
check_item(self, i);
}
}
pub fn check_crate(tcx: &ty::ctxt,
method_map: typeck::MethodMap,
krate: &Crate) {
let mut ctx = Context {
tcx: tcx,
method_map: method_map,
};
visit::walk_crate(&mut ctx, krate, ());
tcx.sess.abort_if_errors();
}
fn check_struct_safe_for_destructor(cx: &mut Context,
span: Span,
struct_did: DefId) {
let struct_tpt = ty::lookup_item_type(cx.tcx, struct_did);
if!struct_tpt.generics.has_type_params() {
let struct_ty = ty::mk_struct(cx.tcx, struct_did, ty::substs {
regions: ty::NonerasedRegions(OwnedSlice::empty()),
self_ty: None,
tps: Vec::new()
});
if!ty::type_is_sendable(cx.tcx, struct_ty) {
cx.tcx.sess.span_err(span,
"cannot implement a destructor on a \
structure that does not satisfy Send");
cx.tcx.sess.span_note(span,
"use \"#[unsafe_destructor]\" on the \
implementation to force the compiler to \
allow this");
}
} else {
cx.tcx.sess.span_err(span,
"cannot implement a destructor on a structure \
with type parameters");
cx.tcx.sess.span_note(span,
"use \"#[unsafe_destructor]\" on the \
implementation to force the compiler to \
allow this");
}
}
fn check_impl_of_trait(cx: &mut Context, it: &Item, trait_ref: &TraitRef, self_type: &Ty) {
let ast_trait_def = *cx.tcx.def_map.borrow()
.find(&trait_ref.ref_id)
.expect("trait ref not in def map!");
let trait_def_id = ast_util::def_id_of_def(ast_trait_def);
let trait_def = *cx.tcx.trait_defs.borrow()
.find(&trait_def_id)
.expect("trait def not in trait-defs map!");
// If this trait has builtin-kind supertraits, meet them.
let self_ty: ty::t = ty::node_id_to_type(cx.tcx, it.id);
debug!("checking impl with self type {:?}", ty::get(self_ty).sty);
check_builtin_bounds(cx, self_ty, trait_def.bounds, |missing| {
cx.tcx.sess.span_err(self_type.span,
format!("the type `{}', which does not fulfill `{}`, cannot implement this \
trait", ty_to_str(cx.tcx, self_ty), missing.user_string(cx.tcx)));
cx.tcx.sess.span_note(self_type.span,
format!("types implementing this trait must fulfill `{}`",
trait_def.bounds.user_string(cx.tcx)));
});
// If this is a destructor, check kinds.
if cx.tcx.lang_items.drop_trait() == Some(trait_def_id) {
match self_type.node {
TyPath(_, ref bounds, path_node_id) => {
assert!(bounds.is_none());
let struct_def = cx.tcx.def_map.borrow().get_copy(&path_node_id);
let struct_did = ast_util::def_id_of_def(struct_def);
check_struct_safe_for_destructor(cx, self_type.span, struct_did);
}
_ => {
cx.tcx.sess.span_bug(self_type.span,
"the self type for the Drop trait impl is not a path");
}
}
}
}
fn check_item(cx: &mut Context, item: &Item) {
if!attr::contains_name(item.attrs.as_slice(), "unsafe_destructor") {
match item.node {
ItemImpl(_, Some(ref trait_ref), self_type, _) => {
check_impl_of_trait(cx, item, trait_ref, self_type);
}
_ => {}
}
}
visit::walk_item(cx, item, ());
}
// Yields the appropriate function to check the kind of closed over
// variables. `id` is the NodeId for some expression that creates the
// closure.
fn with_appropriate_checker(cx: &Context,
id: NodeId,
b: |checker: |&Context, @freevar_entry||) {
fn check_for_uniq(cx: &Context, fv: &freevar_entry, bounds: ty::BuiltinBounds) {
// all captured data must be owned, regardless of whether it is
// moved in or copied in.
let id = ast_util::def_id_of_def(fv.def).node;
let var_t = ty::node_id_to_type(cx.tcx, id);
check_freevar_bounds(cx, fv.span, var_t, bounds, None);
}
fn check_for_block(cx: &Context, fv: &freevar_entry,
bounds: ty::BuiltinBounds, region: ty::Region) {
let id = ast_util::def_id_of_def(fv.def).node;
let var_t = ty::node_id_to_type(cx.tcx, id);
// FIXME(#3569): Figure out whether the implicit borrow is actually
// mutable. Currently we assume all upvars are referenced mutably.
let implicit_borrowed_type = ty::mk_mut_rptr(cx.tcx, region, var_t);
check_freevar_bounds(cx, fv.span, implicit_borrowed_type,
bounds, Some(var_t));
}
fn check_for_bare(cx: &Context, fv: @freevar_entry) {
cx.tcx.sess.span_err(
fv.span,
"can't capture dynamic environment in a fn item; \
use the || {... } closure form instead");
} // same check is done in resolve.rs, but shouldn't be done
let fty = ty::node_id_to_type(cx.tcx, id);
match ty::get(fty).sty {
ty::ty_closure(~ty::ClosureTy {
sigil: OwnedSigil,
bounds: bounds,
..
}) => {
b(|cx, fv| check_for_uniq(cx, fv, bounds))
}
ty::ty_closure(~ty::ClosureTy {
sigil: ManagedSigil,
..
}) => {
// can't happen
fail!("internal error: saw closure with managed sigil (@fn)");
}
ty::ty_closure(~ty::ClosureTy {
sigil: BorrowedSigil,
bounds: bounds,
region: region,
..
}) => {
b(|cx, fv| check_for_block(cx, fv, bounds, region))
}
ty::ty_bare_fn(_) => {
b(check_for_bare)
}
ref s => {
cx.tcx.sess.bug(
format!("expect fn type in kind checker, not {:?}", s));
}
}
}
// Check that the free variables used in a shared/sendable closure conform
// to the copy/move kind bounds. Then recursively check the function body.
fn check_fn(
cx: &mut Context,
fk: &visit::FnKind,
decl: &FnDecl,
body: &Block,
sp: Span,
fn_id: NodeId) {
// Check kinds on free variables:
with_appropriate_checker(cx, fn_id, |chk| {
let r = freevars::get_freevars(cx.tcx, fn_id);
for fv in r.iter() {
chk(cx, *fv);
}
});
visit::walk_fn(cx, fk, decl, body, sp, fn_id, ());
}
pub fn check_expr(cx: &mut Context, e: &Expr) {
debug!("kind::check_expr({})", expr_to_str(e));
// Handle any kind bounds on type parameters
{
let method_map = cx.method_map.borrow();
let method = method_map.find(&typeck::MethodCall::expr(e.id));
let node_type_substs = cx.tcx.node_type_substs.borrow();
let r = match method {
Some(method) => Some(&method.substs.tps),
None => node_type_substs.find(&e.id)
};
for ts in r.iter() {
let def_map = cx.tcx.def_map.borrow();
let type_param_defs = match e.node {
ExprPath(_) => {
let did = ast_util::def_id_of_def(def_map.get_copy(&e.id));
ty::lookup_item_type(cx.tcx, did).generics.type_param_defs.clone()
}
_ => {
// Type substitutions should only occur on paths and
// method calls, so this needs to be a method call.
// Even though the callee_id may have been the id with
// node_type_substs, e.id is correct here.
match method {
Some(method) => {
ty::method_call_type_param_defs(cx.tcx, method.origin)
}
None => {
cx.tcx.sess.span_bug(e.span,
"non path/method call expr has type substs??");
}
}
}
};
if ts.len()!= type_param_defs.len() {
// Fail earlier to make debugging easier
fail!("internal error: in kind::check_expr, length \
mismatch between actual and declared bounds: actual = \
{}, declared = {}",
ts.repr(cx.tcx),
type_param_defs.repr(cx.tcx));
}
for (&ty, type_param_def) in ts.iter().zip(type_param_defs.iter()) {
check_typaram_bounds(cx, e.span, ty, type_param_def)
}
}
}
match e.node {
ExprUnary(UnBox, interior) => {
let interior_type = ty::expr_ty(cx.tcx, interior);
let _ = check_static(cx.tcx, interior_type, interior.span);
}
ExprCast(source, _) => {
let source_ty = ty::expr_ty(cx.tcx, source);
let target_ty = ty::expr_ty(cx.tcx, e);
check_trait_cast(cx, source_ty, target_ty, source.span);
}
ExprRepeat(element, count_expr) => {
let count = ty::eval_repeat_count(cx.tcx, count_expr);
if count > 1 {
let element_ty = ty::expr_ty(cx.tcx, element);
check_copy(cx, element_ty, element.span,
"repeated element will be copied");
}
}
_ => {}
}
// Search for auto-adjustments to find trait coercions.
match cx.tcx.adjustments.borrow().find(&e.id) {
Some(adjustment) => {
match **adjustment {
ty::AutoObject(..) => {
let source_ty = ty::expr_ty(cx.tcx, e);
let target_ty = ty::expr_ty_adjusted(cx.tcx, e,
&*cx.method_map.borrow());
check_trait_cast(cx, source_ty, target_ty, e.span);
}
ty::AutoAddEnv(..) |
ty::AutoDerefRef(..) => {}
}
}
None => {}
}
visit::walk_expr(cx, e, ());
}
fn check_trait_cast(cx: &mut Context, source_ty: ty::t, target_ty: ty::t, span: Span) {
check_cast_for_escaping_regions(cx, source_ty, target_ty, span);
match ty::get(target_ty).sty {
ty::ty_trait(~ty::TyTrait { bounds,.. }) => {
check_trait_cast_bounds(cx, span, source_ty, bounds);
}
_ => {}
}
}
fn check_ty(cx: &mut Context, aty: &Ty) {
match aty.node {
TyPath(_, _, id) => {
let node_type_substs = cx.tcx.node_type_substs.borrow();
let r = node_type_substs.find(&id);
for ts in r.iter() {
let def_map = cx.tcx.def_map.borrow();
let did = ast_util::def_id_of_def(def_map.get_copy(&id));
let generics = ty::lookup_item_type(cx.tcx, did).generics;
let type_param_defs = generics.type_param_defs();
for (&ty, type_param_def) in ts.iter().zip(type_param_defs.iter()) {
check_typaram_bounds(cx, aty.span, ty, type_param_def)
}
}
}
_ => {}
}
visit::walk_ty(cx, aty, ());
}
// Calls "any_missing" if any bounds were missing.
pub fn check_builtin_bounds(cx: &Context,
ty: ty::t,
bounds: ty::BuiltinBounds,
any_missing: |ty::BuiltinBounds|) {
let kind = ty::type_contents(cx.tcx, ty);
let mut missing = ty::EmptyBuiltinBounds();
for bound in bounds.iter() {
if!kind.meets_bound(cx.tcx, bound) {
missing.add(bound);
}
}
if!missing.is_empty() {
any_missing(missing);
}
}
pub fn
|
(cx: &Context,
sp: Span,
ty: ty::t,
type_param_def: &ty::TypeParameterDef) {
check_builtin_bounds(cx,
ty,
type_param_def.bounds.builtin_bounds,
|missing| {
cx.tcx.sess.span_err(
sp,
format!("instantiating a type parameter with an incompatible type \
`{}`, which does not fulfill `{}`",
ty_to_str(cx.tcx, ty),
missing.user_string(cx.tcx)));
});
}
pub fn check_freevar_bounds(cx: &Context, sp: Span, ty: ty::t,
bounds: ty::BuiltinBounds, referenced_ty: Option<ty::t>)
{
check_builtin_bounds(cx, ty, bounds, |missing| {
// Will be Some if the freevar is implicitly borrowed (stack closure).
// Emit a less mysterious error message in this case.
match referenced_ty {
Some(rty) => cx.tcx.sess.span_err(sp,
format!("cannot implicitly borrow variable of type `{}` in a bounded \
stack closure (implicit reference does not fulfill `{}`)",
ty_to_str(cx.tcx, rty), missing.user_string(cx.tcx))),
None => cx.tcx.sess.span_err(sp,
format!("cannot capture variable of type `{}`, which does \
not fulfill `{}`, in a bounded closure",
ty_to_str(cx.tcx, ty), missing.user_string(cx.tcx))),
}
cx.tcx.sess.span_note(
sp,
format!("this closure's environment must satisfy `{}`",
bounds.user_string(cx.tcx)));
});
}
pub fn check_trait_cast_bounds(cx: &Context, sp: Span, ty: ty::t,
bounds: ty::BuiltinBounds) {
check_builtin_bounds(cx, ty, bounds, |missing| {
cx.tcx.sess.span_err(sp,
format!("cannot pack type `{}`, which does not fulfill \
`{}`, as a trait bounded by {}",
ty_to_str(cx.tcx, ty), missing.user_string(cx.tcx),
bounds.user_string(cx.tcx)));
});
}
fn check_copy(cx: &Context, ty: ty::t, sp: Span, reason: &str) {
debug!("type_contents({})={}",
ty_to_str(cx.tcx, ty),
ty::type_contents(cx.tcx, ty).to_str());
if ty::type_moves_by_default(cx.tcx, ty) {
cx.tcx.sess.span_err(
sp, format!("copying a value of non-copyable type `{}`",
ty_to_str(cx.tcx, ty)));
cx.tcx.sess.span_note(sp, format!("{}", reason));
}
}
pub fn check_static(tcx: &ty::ctxt, ty: ty::t, sp: Span) -> bool {
if!ty::type_is_static(tcx, ty) {
match ty::get(ty).sty {
ty::ty_param(..) => {
tcx.sess.span_err(sp,
format!("value may contain references; \
add `'static` bound to `{}`", ty_to_str(tcx, ty)));
}
_ => {
tcx.sess.span_err(sp, "value may contain references");
}
}
false
} else {
true
}
}
/// This is rather subtle. When we are casting a value to an instantiated
/// trait like `a as trait<'r>`, regionck already ensures that any references
/// that appear in the type of `a` are bounded by `'r` (ed.: rem
/// FIXME(#5723)). However, it is possible that there are *type parameters*
/// in the type of `a`, and those *type parameters* may have references
/// within them. We have to guarantee that the regions which appear in those
/// type parameters are not obscured.
///
/// Therefore, we ensure that one of three conditions holds:
///
/// (1) The trait instance cannot escape the current fn. This is
/// guaranteed if the region bound `&r` is some scope within the fn
/// itself. This case is safe because whatever references are
/// found within the type parameter, they must enclose the fn body
|
check_typaram_bounds
|
identifier_name
|
kind.rs
|
b: &Block, s: Span, n: NodeId, _: ()) {
check_fn(self, fk, fd, b, s, n);
}
fn visit_ty(&mut self, t: &Ty, _: ()) {
check_ty(self, t);
}
fn visit_item(&mut self, i: &Item, _: ()) {
check_item(self, i);
}
}
pub fn check_crate(tcx: &ty::ctxt,
method_map: typeck::MethodMap,
krate: &Crate) {
let mut ctx = Context {
tcx: tcx,
method_map: method_map,
};
visit::walk_crate(&mut ctx, krate, ());
tcx.sess.abort_if_errors();
}
fn check_struct_safe_for_destructor(cx: &mut Context,
span: Span,
struct_did: DefId) {
let struct_tpt = ty::lookup_item_type(cx.tcx, struct_did);
if!struct_tpt.generics.has_type_params() {
let struct_ty = ty::mk_struct(cx.tcx, struct_did, ty::substs {
regions: ty::NonerasedRegions(OwnedSlice::empty()),
self_ty: None,
tps: Vec::new()
});
if!ty::type_is_sendable(cx.tcx, struct_ty) {
cx.tcx.sess.span_err(span,
"cannot implement a destructor on a \
structure that does not satisfy Send");
cx.tcx.sess.span_note(span,
"use \"#[unsafe_destructor]\" on the \
implementation to force the compiler to \
allow this");
}
} else {
cx.tcx.sess.span_err(span,
"cannot implement a destructor on a structure \
with type parameters");
cx.tcx.sess.span_note(span,
"use \"#[unsafe_destructor]\" on the \
implementation to force the compiler to \
allow this");
}
}
fn check_impl_of_trait(cx: &mut Context, it: &Item, trait_ref: &TraitRef, self_type: &Ty) {
let ast_trait_def = *cx.tcx.def_map.borrow()
.find(&trait_ref.ref_id)
.expect("trait ref not in def map!");
let trait_def_id = ast_util::def_id_of_def(ast_trait_def);
let trait_def = *cx.tcx.trait_defs.borrow()
.find(&trait_def_id)
.expect("trait def not in trait-defs map!");
// If this trait has builtin-kind supertraits, meet them.
let self_ty: ty::t = ty::node_id_to_type(cx.tcx, it.id);
debug!("checking impl with self type {:?}", ty::get(self_ty).sty);
check_builtin_bounds(cx, self_ty, trait_def.bounds, |missing| {
cx.tcx.sess.span_err(self_type.span,
format!("the type `{}', which does not fulfill `{}`, cannot implement this \
trait", ty_to_str(cx.tcx, self_ty), missing.user_string(cx.tcx)));
cx.tcx.sess.span_note(self_type.span,
format!("types implementing this trait must fulfill `{}`",
trait_def.bounds.user_string(cx.tcx)));
});
// If this is a destructor, check kinds.
if cx.tcx.lang_items.drop_trait() == Some(trait_def_id) {
match self_type.node {
TyPath(_, ref bounds, path_node_id) => {
assert!(bounds.is_none());
let struct_def = cx.tcx.def_map.borrow().get_copy(&path_node_id);
let struct_did = ast_util::def_id_of_def(struct_def);
check_struct_safe_for_destructor(cx, self_type.span, struct_did);
}
_ => {
cx.tcx.sess.span_bug(self_type.span,
"the self type for the Drop trait impl is not a path");
}
}
}
}
fn check_item(cx: &mut Context, item: &Item) {
if!attr::contains_name(item.attrs.as_slice(), "unsafe_destructor") {
match item.node {
ItemImpl(_, Some(ref trait_ref), self_type, _) => {
check_impl_of_trait(cx, item, trait_ref, self_type);
}
_ => {}
}
}
visit::walk_item(cx, item, ());
}
// Yields the appropriate function to check the kind of closed over
// variables. `id` is the NodeId for some expression that creates the
// closure.
fn with_appropriate_checker(cx: &Context,
id: NodeId,
b: |checker: |&Context, @freevar_entry||) {
fn check_for_uniq(cx: &Context, fv: &freevar_entry, bounds: ty::BuiltinBounds) {
// all captured data must be owned, regardless of whether it is
// moved in or copied in.
let id = ast_util::def_id_of_def(fv.def).node;
let var_t = ty::node_id_to_type(cx.tcx, id);
check_freevar_bounds(cx, fv.span, var_t, bounds, None);
}
fn check_for_block(cx: &Context, fv: &freevar_entry,
bounds: ty::BuiltinBounds, region: ty::Region) {
let id = ast_util::def_id_of_def(fv.def).node;
let var_t = ty::node_id_to_type(cx.tcx, id);
// FIXME(#3569): Figure out whether the implicit borrow is actually
// mutable. Currently we assume all upvars are referenced mutably.
let implicit_borrowed_type = ty::mk_mut_rptr(cx.tcx, region, var_t);
check_freevar_bounds(cx, fv.span, implicit_borrowed_type,
bounds, Some(var_t));
}
fn check_for_bare(cx: &Context, fv: @freevar_entry) {
cx.tcx.sess.span_err(
fv.span,
"can't capture dynamic environment in a fn item; \
use the || {... } closure form instead");
} // same check is done in resolve.rs, but shouldn't be done
let fty = ty::node_id_to_type(cx.tcx, id);
match ty::get(fty).sty {
ty::ty_closure(~ty::ClosureTy {
sigil: OwnedSigil,
bounds: bounds,
..
}) => {
b(|cx, fv| check_for_uniq(cx, fv, bounds))
}
ty::ty_closure(~ty::ClosureTy {
sigil: ManagedSigil,
..
}) => {
// can't happen
fail!("internal error: saw closure with managed sigil (@fn)");
}
ty::ty_closure(~ty::ClosureTy {
sigil: BorrowedSigil,
bounds: bounds,
region: region,
..
}) => {
b(|cx, fv| check_for_block(cx, fv, bounds, region))
}
ty::ty_bare_fn(_) => {
b(check_for_bare)
}
ref s => {
cx.tcx.sess.bug(
format!("expect fn type in kind checker, not {:?}", s));
}
}
}
// Check that the free variables used in a shared/sendable closure conform
// to the copy/move kind bounds. Then recursively check the function body.
fn check_fn(
cx: &mut Context,
fk: &visit::FnKind,
decl: &FnDecl,
body: &Block,
sp: Span,
fn_id: NodeId) {
// Check kinds on free variables:
with_appropriate_checker(cx, fn_id, |chk| {
let r = freevars::get_freevars(cx.tcx, fn_id);
for fv in r.iter() {
chk(cx, *fv);
}
});
visit::walk_fn(cx, fk, decl, body, sp, fn_id, ());
}
pub fn check_expr(cx: &mut Context, e: &Expr) {
debug!("kind::check_expr({})", expr_to_str(e));
// Handle any kind bounds on type parameters
{
let method_map = cx.method_map.borrow();
let method = method_map.find(&typeck::MethodCall::expr(e.id));
let node_type_substs = cx.tcx.node_type_substs.borrow();
let r = match method {
Some(method) => Some(&method.substs.tps),
None => node_type_substs.find(&e.id)
};
for ts in r.iter() {
let def_map = cx.tcx.def_map.borrow();
let type_param_defs = match e.node {
ExprPath(_) => {
let did = ast_util::def_id_of_def(def_map.get_copy(&e.id));
ty::lookup_item_type(cx.tcx, did).generics.type_param_defs.clone()
}
_ => {
// Type substitutions should only occur on paths and
// method calls, so this needs to be a method call.
// Even though the callee_id may have been the id with
// node_type_substs, e.id is correct here.
match method {
Some(method) => {
ty::method_call_type_param_defs(cx.tcx, method.origin)
}
None =>
|
}
}
};
if ts.len()!= type_param_defs.len() {
// Fail earlier to make debugging easier
fail!("internal error: in kind::check_expr, length \
mismatch between actual and declared bounds: actual = \
{}, declared = {}",
ts.repr(cx.tcx),
type_param_defs.repr(cx.tcx));
}
for (&ty, type_param_def) in ts.iter().zip(type_param_defs.iter()) {
check_typaram_bounds(cx, e.span, ty, type_param_def)
}
}
}
match e.node {
ExprUnary(UnBox, interior) => {
let interior_type = ty::expr_ty(cx.tcx, interior);
let _ = check_static(cx.tcx, interior_type, interior.span);
}
ExprCast(source, _) => {
let source_ty = ty::expr_ty(cx.tcx, source);
let target_ty = ty::expr_ty(cx.tcx, e);
check_trait_cast(cx, source_ty, target_ty, source.span);
}
ExprRepeat(element, count_expr) => {
let count = ty::eval_repeat_count(cx.tcx, count_expr);
if count > 1 {
let element_ty = ty::expr_ty(cx.tcx, element);
check_copy(cx, element_ty, element.span,
"repeated element will be copied");
}
}
_ => {}
}
// Search for auto-adjustments to find trait coercions.
match cx.tcx.adjustments.borrow().find(&e.id) {
Some(adjustment) => {
match **adjustment {
ty::AutoObject(..) => {
let source_ty = ty::expr_ty(cx.tcx, e);
let target_ty = ty::expr_ty_adjusted(cx.tcx, e,
&*cx.method_map.borrow());
check_trait_cast(cx, source_ty, target_ty, e.span);
}
ty::AutoAddEnv(..) |
ty::AutoDerefRef(..) => {}
}
}
None => {}
}
visit::walk_expr(cx, e, ());
}
fn check_trait_cast(cx: &mut Context, source_ty: ty::t, target_ty: ty::t, span: Span) {
check_cast_for_escaping_regions(cx, source_ty, target_ty, span);
match ty::get(target_ty).sty {
ty::ty_trait(~ty::TyTrait { bounds,.. }) => {
check_trait_cast_bounds(cx, span, source_ty, bounds);
}
_ => {}
}
}
fn check_ty(cx: &mut Context, aty: &Ty) {
match aty.node {
TyPath(_, _, id) => {
let node_type_substs = cx.tcx.node_type_substs.borrow();
let r = node_type_substs.find(&id);
for ts in r.iter() {
let def_map = cx.tcx.def_map.borrow();
let did = ast_util::def_id_of_def(def_map.get_copy(&id));
let generics = ty::lookup_item_type(cx.tcx, did).generics;
let type_param_defs = generics.type_param_defs();
for (&ty, type_param_def) in ts.iter().zip(type_param_defs.iter()) {
check_typaram_bounds(cx, aty.span, ty, type_param_def)
}
}
}
_ => {}
}
visit::walk_ty(cx, aty, ());
}
// Calls "any_missing" if any bounds were missing.
pub fn check_builtin_bounds(cx: &Context,
ty: ty::t,
bounds: ty::BuiltinBounds,
any_missing: |ty::BuiltinBounds|) {
let kind = ty::type_contents(cx.tcx, ty);
let mut missing = ty::EmptyBuiltinBounds();
for bound in bounds.iter() {
if!kind.meets_bound(cx.tcx, bound) {
missing.add(bound);
}
}
if!missing.is_empty() {
any_missing(missing);
}
}
pub fn check_typaram_bounds(cx: &Context,
sp: Span,
ty: ty::t,
type_param_def: &ty::TypeParameterDef) {
check_builtin_bounds(cx,
ty,
type_param_def.bounds.builtin_bounds,
|missing| {
cx.tcx.sess.span_err(
sp,
format!("instantiating a type parameter with an incompatible type \
`{}`, which does not fulfill `{}`",
ty_to_str(cx.tcx, ty),
missing.user_string(cx.tcx)));
});
}
pub fn check_freevar_bounds(cx: &Context, sp: Span, ty: ty::t,
bounds: ty::BuiltinBounds, referenced_ty: Option<ty::t>)
{
check_builtin_bounds(cx, ty, bounds, |missing| {
// Will be Some if the freevar is implicitly borrowed (stack closure).
// Emit a less mysterious error message in this case.
match referenced_ty {
Some(rty) => cx.tcx.sess.span_err(sp,
format!("cannot implicitly borrow variable of type `{}` in a bounded \
stack closure (implicit reference does not fulfill `{}`)",
ty_to_str(cx.tcx, rty), missing.user_string(cx.tcx))),
None => cx.tcx.sess.span_err(sp,
format!("cannot capture variable of type `{}`, which does \
not fulfill `{}`, in a bounded closure",
ty_to_str(cx.tcx, ty), missing.user_string(cx.tcx))),
}
cx.tcx.sess.span_note(
sp,
format!("this closure's environment must satisfy `{}`",
bounds.user_string(cx.tcx)));
});
}
pub fn check_trait_cast_bounds(cx: &Context, sp: Span, ty: ty::t,
bounds: ty::BuiltinBounds) {
check_builtin_bounds(cx, ty, bounds, |missing| {
cx.tcx.sess.span_err(sp,
format!("cannot pack type `{}`, which does not fulfill \
`{}`, as a trait bounded by {}",
ty_to_str(cx.tcx, ty), missing.user_string(cx.tcx),
bounds.user_string(cx.tcx)));
});
}
fn check_copy(cx: &Context, ty: ty::t, sp: Span, reason: &str) {
debug!("type_contents({})={}",
ty_to_str(cx.tcx, ty),
ty::type_contents(cx.tcx, ty).to_str());
if ty::type_moves_by_default(cx.tcx, ty) {
cx.tcx.sess.span_err(
sp, format!("copying a value of non-copyable type `{}`",
ty_to_str(cx.tcx, ty)));
cx.tcx.sess.span_note(sp, format!("{}", reason));
}
}
pub fn check_static(tcx: &ty::ctxt, ty: ty::t, sp: Span) -> bool {
if!ty::type_is_static(tcx, ty) {
match ty::get(ty).sty {
ty::ty_param(..) => {
tcx.sess.span_err(sp,
format!("value may contain references; \
add `'static` bound to `{}`", ty_to_str(tcx, ty)));
}
_ => {
tcx.sess.span_err(sp, "value may contain references");
}
}
false
} else {
true
}
}
/// This is rather subtle. When we are casting a value to an instantiated
/// trait like `a as trait<'r>`, regionck already ensures that any references
/// that appear in the type of `a` are bounded by `'r` (ed.: rem
/// FIXME(#5723)). However, it is possible that there are *type parameters*
/// in the type of `a`, and those *type parameters* may have references
/// within them. We have to guarantee that the regions which appear in those
/// type parameters are not obscured.
///
/// Therefore, we ensure that one of three conditions holds:
///
/// (1) The trait instance cannot escape the current fn. This is
/// guaranteed if the region bound `&r` is some scope within the fn
/// itself. This case is safe because whatever references are
/// found within the type parameter, they must enclose the fn body
|
{
cx.tcx.sess.span_bug(e.span,
"non path/method call expr has type substs??");
}
|
conditional_block
|
kind.rs
|
b: &Block, s: Span, n: NodeId, _: ()) {
check_fn(self, fk, fd, b, s, n);
}
fn visit_ty(&mut self, t: &Ty, _: ()) {
check_ty(self, t);
}
fn visit_item(&mut self, i: &Item, _: ()) {
check_item(self, i);
}
}
pub fn check_crate(tcx: &ty::ctxt,
method_map: typeck::MethodMap,
krate: &Crate) {
let mut ctx = Context {
tcx: tcx,
method_map: method_map,
};
visit::walk_crate(&mut ctx, krate, ());
tcx.sess.abort_if_errors();
}
fn check_struct_safe_for_destructor(cx: &mut Context,
span: Span,
struct_did: DefId) {
let struct_tpt = ty::lookup_item_type(cx.tcx, struct_did);
if!struct_tpt.generics.has_type_params() {
let struct_ty = ty::mk_struct(cx.tcx, struct_did, ty::substs {
regions: ty::NonerasedRegions(OwnedSlice::empty()),
self_ty: None,
tps: Vec::new()
});
if!ty::type_is_sendable(cx.tcx, struct_ty) {
cx.tcx.sess.span_err(span,
"cannot implement a destructor on a \
structure that does not satisfy Send");
cx.tcx.sess.span_note(span,
"use \"#[unsafe_destructor]\" on the \
implementation to force the compiler to \
allow this");
}
} else {
cx.tcx.sess.span_err(span,
"cannot implement a destructor on a structure \
with type parameters");
cx.tcx.sess.span_note(span,
"use \"#[unsafe_destructor]\" on the \
implementation to force the compiler to \
allow this");
}
}
fn check_impl_of_trait(cx: &mut Context, it: &Item, trait_ref: &TraitRef, self_type: &Ty) {
let ast_trait_def = *cx.tcx.def_map.borrow()
.find(&trait_ref.ref_id)
.expect("trait ref not in def map!");
let trait_def_id = ast_util::def_id_of_def(ast_trait_def);
let trait_def = *cx.tcx.trait_defs.borrow()
.find(&trait_def_id)
.expect("trait def not in trait-defs map!");
// If this trait has builtin-kind supertraits, meet them.
let self_ty: ty::t = ty::node_id_to_type(cx.tcx, it.id);
debug!("checking impl with self type {:?}", ty::get(self_ty).sty);
check_builtin_bounds(cx, self_ty, trait_def.bounds, |missing| {
cx.tcx.sess.span_err(self_type.span,
format!("the type `{}', which does not fulfill `{}`, cannot implement this \
trait", ty_to_str(cx.tcx, self_ty), missing.user_string(cx.tcx)));
cx.tcx.sess.span_note(self_type.span,
format!("types implementing this trait must fulfill `{}`",
trait_def.bounds.user_string(cx.tcx)));
});
// If this is a destructor, check kinds.
if cx.tcx.lang_items.drop_trait() == Some(trait_def_id) {
match self_type.node {
TyPath(_, ref bounds, path_node_id) => {
assert!(bounds.is_none());
let struct_def = cx.tcx.def_map.borrow().get_copy(&path_node_id);
let struct_did = ast_util::def_id_of_def(struct_def);
check_struct_safe_for_destructor(cx, self_type.span, struct_did);
}
_ => {
cx.tcx.sess.span_bug(self_type.span,
"the self type for the Drop trait impl is not a path");
}
}
}
}
fn check_item(cx: &mut Context, item: &Item) {
if!attr::contains_name(item.attrs.as_slice(), "unsafe_destructor") {
match item.node {
ItemImpl(_, Some(ref trait_ref), self_type, _) => {
check_impl_of_trait(cx, item, trait_ref, self_type);
}
_ => {}
}
}
visit::walk_item(cx, item, ());
}
// Yields the appropriate function to check the kind of closed over
// variables. `id` is the NodeId for some expression that creates the
// closure.
fn with_appropriate_checker(cx: &Context,
id: NodeId,
b: |checker: |&Context, @freevar_entry||) {
fn check_for_uniq(cx: &Context, fv: &freevar_entry, bounds: ty::BuiltinBounds) {
// all captured data must be owned, regardless of whether it is
// moved in or copied in.
let id = ast_util::def_id_of_def(fv.def).node;
let var_t = ty::node_id_to_type(cx.tcx, id);
check_freevar_bounds(cx, fv.span, var_t, bounds, None);
}
fn check_for_block(cx: &Context, fv: &freevar_entry,
bounds: ty::BuiltinBounds, region: ty::Region) {
let id = ast_util::def_id_of_def(fv.def).node;
let var_t = ty::node_id_to_type(cx.tcx, id);
// FIXME(#3569): Figure out whether the implicit borrow is actually
// mutable. Currently we assume all upvars are referenced mutably.
let implicit_borrowed_type = ty::mk_mut_rptr(cx.tcx, region, var_t);
check_freevar_bounds(cx, fv.span, implicit_borrowed_type,
bounds, Some(var_t));
}
fn check_for_bare(cx: &Context, fv: @freevar_entry) {
cx.tcx.sess.span_err(
fv.span,
"can't capture dynamic environment in a fn item; \
use the || {... } closure form instead");
} // same check is done in resolve.rs, but shouldn't be done
let fty = ty::node_id_to_type(cx.tcx, id);
match ty::get(fty).sty {
ty::ty_closure(~ty::ClosureTy {
sigil: OwnedSigil,
bounds: bounds,
..
}) => {
b(|cx, fv| check_for_uniq(cx, fv, bounds))
}
ty::ty_closure(~ty::ClosureTy {
sigil: ManagedSigil,
..
}) => {
// can't happen
fail!("internal error: saw closure with managed sigil (@fn)");
}
ty::ty_closure(~ty::ClosureTy {
sigil: BorrowedSigil,
bounds: bounds,
region: region,
..
}) => {
b(|cx, fv| check_for_block(cx, fv, bounds, region))
}
ty::ty_bare_fn(_) => {
b(check_for_bare)
}
ref s => {
cx.tcx.sess.bug(
format!("expect fn type in kind checker, not {:?}", s));
}
}
}
// Check that the free variables used in a shared/sendable closure conform
// to the copy/move kind bounds. Then recursively check the function body.
fn check_fn(
cx: &mut Context,
fk: &visit::FnKind,
decl: &FnDecl,
body: &Block,
sp: Span,
fn_id: NodeId) {
// Check kinds on free variables:
with_appropriate_checker(cx, fn_id, |chk| {
let r = freevars::get_freevars(cx.tcx, fn_id);
for fv in r.iter() {
chk(cx, *fv);
}
});
visit::walk_fn(cx, fk, decl, body, sp, fn_id, ());
}
pub fn check_expr(cx: &mut Context, e: &Expr) {
debug!("kind::check_expr({})", expr_to_str(e));
// Handle any kind bounds on type parameters
{
let method_map = cx.method_map.borrow();
let method = method_map.find(&typeck::MethodCall::expr(e.id));
let node_type_substs = cx.tcx.node_type_substs.borrow();
let r = match method {
Some(method) => Some(&method.substs.tps),
None => node_type_substs.find(&e.id)
};
for ts in r.iter() {
let def_map = cx.tcx.def_map.borrow();
let type_param_defs = match e.node {
ExprPath(_) => {
let did = ast_util::def_id_of_def(def_map.get_copy(&e.id));
ty::lookup_item_type(cx.tcx, did).generics.type_param_defs.clone()
}
_ => {
// Type substitutions should only occur on paths and
// method calls, so this needs to be a method call.
// Even though the callee_id may have been the id with
// node_type_substs, e.id is correct here.
match method {
Some(method) => {
ty::method_call_type_param_defs(cx.tcx, method.origin)
}
None => {
cx.tcx.sess.span_bug(e.span,
"non path/method call expr has type substs??");
}
}
}
};
if ts.len()!= type_param_defs.len() {
// Fail earlier to make debugging easier
fail!("internal error: in kind::check_expr, length \
mismatch between actual and declared bounds: actual = \
{}, declared = {}",
ts.repr(cx.tcx),
type_param_defs.repr(cx.tcx));
}
for (&ty, type_param_def) in ts.iter().zip(type_param_defs.iter()) {
check_typaram_bounds(cx, e.span, ty, type_param_def)
}
}
}
match e.node {
ExprUnary(UnBox, interior) => {
let interior_type = ty::expr_ty(cx.tcx, interior);
let _ = check_static(cx.tcx, interior_type, interior.span);
}
ExprCast(source, _) => {
let source_ty = ty::expr_ty(cx.tcx, source);
let target_ty = ty::expr_ty(cx.tcx, e);
check_trait_cast(cx, source_ty, target_ty, source.span);
}
ExprRepeat(element, count_expr) => {
let count = ty::eval_repeat_count(cx.tcx, count_expr);
if count > 1 {
let element_ty = ty::expr_ty(cx.tcx, element);
check_copy(cx, element_ty, element.span,
"repeated element will be copied");
}
}
_ => {}
}
// Search for auto-adjustments to find trait coercions.
match cx.tcx.adjustments.borrow().find(&e.id) {
Some(adjustment) => {
match **adjustment {
ty::AutoObject(..) => {
let source_ty = ty::expr_ty(cx.tcx, e);
let target_ty = ty::expr_ty_adjusted(cx.tcx, e,
&*cx.method_map.borrow());
check_trait_cast(cx, source_ty, target_ty, e.span);
}
ty::AutoAddEnv(..) |
ty::AutoDerefRef(..) => {}
}
}
None => {}
}
visit::walk_expr(cx, e, ());
}
fn check_trait_cast(cx: &mut Context, source_ty: ty::t, target_ty: ty::t, span: Span) {
check_cast_for_escaping_regions(cx, source_ty, target_ty, span);
match ty::get(target_ty).sty {
ty::ty_trait(~ty::TyTrait { bounds,.. }) => {
check_trait_cast_bounds(cx, span, source_ty, bounds);
}
_ => {}
}
}
fn check_ty(cx: &mut Context, aty: &Ty) {
match aty.node {
TyPath(_, _, id) => {
let node_type_substs = cx.tcx.node_type_substs.borrow();
let r = node_type_substs.find(&id);
for ts in r.iter() {
let def_map = cx.tcx.def_map.borrow();
let did = ast_util::def_id_of_def(def_map.get_copy(&id));
let generics = ty::lookup_item_type(cx.tcx, did).generics;
let type_param_defs = generics.type_param_defs();
for (&ty, type_param_def) in ts.iter().zip(type_param_defs.iter()) {
check_typaram_bounds(cx, aty.span, ty, type_param_def)
}
|
}
// Calls "any_missing" if any bounds were missing.
pub fn check_builtin_bounds(cx: &Context,
ty: ty::t,
bounds: ty::BuiltinBounds,
any_missing: |ty::BuiltinBounds|) {
let kind = ty::type_contents(cx.tcx, ty);
let mut missing = ty::EmptyBuiltinBounds();
for bound in bounds.iter() {
if!kind.meets_bound(cx.tcx, bound) {
missing.add(bound);
}
}
if!missing.is_empty() {
any_missing(missing);
}
}
pub fn check_typaram_bounds(cx: &Context,
sp: Span,
ty: ty::t,
type_param_def: &ty::TypeParameterDef) {
check_builtin_bounds(cx,
ty,
type_param_def.bounds.builtin_bounds,
|missing| {
cx.tcx.sess.span_err(
sp,
format!("instantiating a type parameter with an incompatible type \
`{}`, which does not fulfill `{}`",
ty_to_str(cx.tcx, ty),
missing.user_string(cx.tcx)));
});
}
pub fn check_freevar_bounds(cx: &Context, sp: Span, ty: ty::t,
bounds: ty::BuiltinBounds, referenced_ty: Option<ty::t>)
{
check_builtin_bounds(cx, ty, bounds, |missing| {
// Will be Some if the freevar is implicitly borrowed (stack closure).
// Emit a less mysterious error message in this case.
match referenced_ty {
Some(rty) => cx.tcx.sess.span_err(sp,
format!("cannot implicitly borrow variable of type `{}` in a bounded \
stack closure (implicit reference does not fulfill `{}`)",
ty_to_str(cx.tcx, rty), missing.user_string(cx.tcx))),
None => cx.tcx.sess.span_err(sp,
format!("cannot capture variable of type `{}`, which does \
not fulfill `{}`, in a bounded closure",
ty_to_str(cx.tcx, ty), missing.user_string(cx.tcx))),
}
cx.tcx.sess.span_note(
sp,
format!("this closure's environment must satisfy `{}`",
bounds.user_string(cx.tcx)));
});
}
pub fn check_trait_cast_bounds(cx: &Context, sp: Span, ty: ty::t,
bounds: ty::BuiltinBounds) {
check_builtin_bounds(cx, ty, bounds, |missing| {
cx.tcx.sess.span_err(sp,
format!("cannot pack type `{}`, which does not fulfill \
`{}`, as a trait bounded by {}",
ty_to_str(cx.tcx, ty), missing.user_string(cx.tcx),
bounds.user_string(cx.tcx)));
});
}
fn check_copy(cx: &Context, ty: ty::t, sp: Span, reason: &str) {
debug!("type_contents({})={}",
ty_to_str(cx.tcx, ty),
ty::type_contents(cx.tcx, ty).to_str());
if ty::type_moves_by_default(cx.tcx, ty) {
cx.tcx.sess.span_err(
sp, format!("copying a value of non-copyable type `{}`",
ty_to_str(cx.tcx, ty)));
cx.tcx.sess.span_note(sp, format!("{}", reason));
}
}
pub fn check_static(tcx: &ty::ctxt, ty: ty::t, sp: Span) -> bool {
if!ty::type_is_static(tcx, ty) {
match ty::get(ty).sty {
ty::ty_param(..) => {
tcx.sess.span_err(sp,
format!("value may contain references; \
add `'static` bound to `{}`", ty_to_str(tcx, ty)));
}
_ => {
tcx.sess.span_err(sp, "value may contain references");
}
}
false
} else {
true
}
}
/// This is rather subtle. When we are casting a value to an instantiated
/// trait like `a as trait<'r>`, regionck already ensures that any references
/// that appear in the type of `a` are bounded by `'r` (ed.: rem
/// FIXME(#5723)). However, it is possible that there are *type parameters*
/// in the type of `a`, and those *type parameters* may have references
/// within them. We have to guarantee that the regions which appear in those
/// type parameters are not obscured.
///
/// Therefore, we ensure that one of three conditions holds:
///
/// (1) The trait instance cannot escape the current fn. This is
/// guaranteed if the region bound `&r` is some scope within the fn
/// itself. This case is safe because whatever references are
/// found within the type parameter, they must enclose the fn body
///
|
}
}
_ => {}
}
visit::walk_ty(cx, aty, ());
|
random_line_split
|
datum.rs
|
.fcx.schedule_drop_mem(scope, scratch, ty);
DatumBlock(bcx, Datum(scratch, ty, Lvalue))
}
pub fn rvalue_scratch_datum(bcx: &Block,
ty: ty::t,
name: &str)
-> Datum<Rvalue> {
/*!
* Allocates temporary space on the stack using alloca() and
* returns a by-ref Datum pointing to it. If `zero` is true, the
* space will be zeroed when it is allocated; this is normally not
* necessary, but in the case of automatic rooting in match
* statements it is possible to have temporaries that may not get
* initialized if a certain arm is not taken, so we must zero
* them. You must arrange any cleanups etc yourself!
*/
let llty = type_of::type_of(bcx.ccx(), ty);
let scratch = alloca_maybe_zeroed(bcx, llty, name, false);
Datum(scratch, ty, Rvalue(ByRef))
}
pub fn appropriate_rvalue_mode(ccx: &CrateContext, ty: ty::t) -> RvalueMode {
/*!
* Indicates the "appropriate" mode for this value,
* which is either by ref or by value, depending
* on whether type is immediate or not.
*/
if type_is_zero_size(ccx, ty) {
ByValue
} else if type_is_immediate(ccx, ty) {
ByValue
} else {
ByRef
}
}
fn add_rvalue_clean(mode: RvalueMode,
fcx: &FunctionContext,
scope: cleanup::ScopeId,
val: ValueRef,
ty: ty::t) {
match mode {
ByValue => { fcx.schedule_drop_immediate(scope, val, ty); }
ByRef => { fcx.schedule_drop_mem(scope, val, ty); }
}
}
pub trait KindOps {
/**
* Take appropriate action after the value in `datum` has been
* stored to a new location.
*/
fn post_store<'a>(&self,
bcx: &'a Block<'a>,
val: ValueRef,
ty: ty::t)
-> &'a Block<'a>;
/**
* True if this mode is a reference mode, meaning that the datum's
* val field is a pointer to the actual value
*/
fn is_by_ref(&self) -> bool;
/**
* Converts to an Expr kind
*/
fn to_expr_kind(self) -> Expr;
}
impl KindOps for Rvalue {
fn post_store<'a>(&self,
bcx: &'a Block<'a>,
_val: ValueRef,
_ty: ty::t)
-> &'a Block<'a> {
// No cleanup is scheduled for an rvalue, so we don't have
// to do anything after a move to cancel or duplicate it.
bcx
}
fn is_by_ref(&self) -> bool {
self.mode == ByRef
}
fn to_expr_kind(self) -> Expr {
RvalueExpr(self)
}
}
impl KindOps for Lvalue {
fn post_store<'a>(&self,
bcx: &'a Block<'a>,
val: ValueRef,
ty: ty::t)
-> &'a Block<'a> {
/*!
* If an lvalue is moved, we must zero out the memory in which
* it resides so as to cancel cleanup. If an @T lvalue is
* copied, we must increment the reference count.
*/
if ty::type_needs_drop(bcx.tcx(), ty) {
if ty::type_moves_by_default(bcx.tcx(), ty) {
// cancel cleanup of affine values by zeroing out
let () = zero_mem(bcx, val, ty);
bcx
} else {
// incr. refcount for @T or newtype'd @T
glue::take_ty(bcx, val, ty)
}
} else {
bcx
}
}
fn is_by_ref(&self) -> bool {
true
}
fn to_expr_kind(self) -> Expr {
LvalueExpr
}
}
impl KindOps for Expr {
fn post_store<'a>(&self,
bcx: &'a Block<'a>,
val: ValueRef,
ty: ty::t)
-> &'a Block<'a> {
match *self {
LvalueExpr => Lvalue.post_store(bcx, val, ty),
RvalueExpr(ref r) => r.post_store(bcx, val, ty),
}
}
fn is_by_ref(&self) -> bool {
match *self {
LvalueExpr => Lvalue.is_by_ref(),
RvalueExpr(ref r) => r.is_by_ref()
}
}
fn to_expr_kind(self) -> Expr {
self
}
}
impl Datum<Rvalue> {
pub fn add_clean(self,
fcx: &FunctionContext,
scope: cleanup::ScopeId)
-> ValueRef {
/*!
* Schedules a cleanup for this datum in the given scope.
* That means that this datum is no longer an rvalue datum;
* hence, this function consumes the datum and returns the
* contained ValueRef.
*/
add_rvalue_clean(self.kind.mode, fcx, scope, self.val, self.ty);
self.val
}
pub fn to_lvalue_datum_in_scope<'a>(self,
bcx: &'a Block<'a>,
name: &str,
scope: cleanup::ScopeId)
-> DatumBlock<'a, Lvalue> {
/*!
* Returns an lvalue datum (that is, a by ref datum with
* cleanup scheduled). If `self` is not already an lvalue,
* cleanup will be scheduled in the temporary scope for `expr_id`.
*/
let fcx = bcx.fcx;
match self.kind.mode {
ByRef => {
add_rvalue_clean(ByRef, fcx, scope, self.val, self.ty);
DatumBlock(bcx, Datum(self.val, self.ty, Lvalue))
}
ByValue => {
lvalue_scratch_datum(
bcx, self.ty, name, false, scope, self,
|this, bcx, llval| this.store_to(bcx, llval))
}
}
}
pub fn to_ref_datum<'a>(self, bcx: &'a Block<'a>) -> DatumBlock<'a, Rvalue> {
let mut bcx = bcx;
match self.kind.mode {
ByRef => DatumBlock(bcx, self),
ByValue => {
let scratch = rvalue_scratch_datum(bcx, self.ty, "to_ref");
bcx = self.store_to(bcx, scratch.val);
DatumBlock(bcx, scratch)
}
}
}
pub fn to_appropriate_datum<'a>(self,
bcx: &'a Block<'a>)
-> DatumBlock<'a, Rvalue> {
match self.appropriate_rvalue_mode(bcx.ccx()) {
ByRef => {
self.to_ref_datum(bcx)
}
ByValue => {
match self.kind.mode {
ByValue => DatumBlock(bcx, self),
ByRef => {
let llval = load(bcx, self.val, self.ty);
DatumBlock(bcx, Datum(llval, self.ty, Rvalue(ByValue)))
}
}
}
}
}
}
/**
* Methods suitable for "expr" datums that could be either lvalues or
* rvalues. These include coercions into lvalues/rvalues but also a number
* of more general operations. (Some of those operations could be moved to
* the more general `impl<K> Datum<K>`, but it's convenient to have them
* here since we can `match self.kind` rather than having to implement
* generic methods in `KindOps`.)
*/
impl Datum<Expr> {
fn match_kind<R>(self,
if_lvalue: |Datum<Lvalue>| -> R,
if_rvalue: |Datum<Rvalue>| -> R)
-> R {
let Datum { val, ty, kind } = self;
match kind {
LvalueExpr => if_lvalue(Datum(val, ty, Lvalue)),
RvalueExpr(r) => if_rvalue(Datum(val, ty, r)),
}
}
#[allow(dead_code)] // potentially useful
pub fn assert_lvalue(self, bcx: &Block) -> Datum<Lvalue> {
/*!
* Asserts that this datum *is* an lvalue and returns it.
*/
self.match_kind(
|d| d,
|_| bcx.sess().bug("assert_lvalue given rvalue"))
}
pub fn assert_rvalue(self, bcx: &Block) -> Datum<Rvalue> {
/*!
* Asserts that this datum *is* an lvalue and returns it.
*/
self.match_kind(
|_| bcx.sess().bug("assert_rvalue given lvalue"),
|r| r)
}
pub fn store_to_dest<'a>(self,
bcx: &'a Block<'a>,
dest: expr::Dest,
expr_id: ast::NodeId)
-> &'a Block<'a> {
match dest {
expr::Ignore => {
self.add_clean_if_rvalue(bcx, expr_id);
bcx
}
expr::SaveIn(addr) => {
self.store_to(bcx, addr)
}
}
}
pub fn add_clean_if_rvalue<'a>(self,
bcx: &'a Block<'a>,
expr_id: ast::NodeId) {
/*!
* Arranges cleanup for `self` if it is an rvalue. Use when
* you are done working with a value that may need drop.
*/
self.match_kind(
|_| { /* Nothing to do, cleanup already arranged */ },
|r| {
let scope = cleanup::temporary_scope(bcx.tcx(), expr_id);
r.add_clean(bcx.fcx, scope);
})
}
pub fn clean<'a>(self,
bcx: &'a Block<'a>,
name: &'static str,
expr_id: ast::NodeId)
-> &'a Block<'a> {
/*!
* Ensures that `self` will get cleaned up, if it is not an lvalue
* already.
*/
self.to_lvalue_datum(bcx, name, expr_id).bcx
}
pub fn to_lvalue_datum<'a>(self,
bcx: &'a Block<'a>,
name: &str,
expr_id: ast::NodeId)
-> DatumBlock<'a, Lvalue> {
self.match_kind(
|l| DatumBlock(bcx, l),
|r| {
let scope = cleanup::temporary_scope(bcx.tcx(), expr_id);
r.to_lvalue_datum_in_scope(bcx, name, scope)
})
}
pub fn to_rvalue_datum<'a>(self,
bcx: &'a Block<'a>,
name: &'static str)
-> DatumBlock<'a, Rvalue> {
/*!
* Ensures that we have an rvalue datum (that is, a datum with
* no cleanup scheduled).
*/
self.match_kind(
|l| {
let mut bcx = bcx;
match l.appropriate_rvalue_mode(bcx.ccx()) {
ByRef => {
let scratch = rvalue_scratch_datum(bcx, l.ty, name);
bcx = l.store_to(bcx, scratch.val);
DatumBlock(bcx, scratch)
}
ByValue => {
let v = load(bcx, l.val, l.ty);
bcx = l.kind.post_store(bcx, l.val, l.ty);
DatumBlock(bcx, Datum(v, l.ty, Rvalue(ByValue)))
}
}
},
|r| DatumBlock(bcx, r))
}
}
/**
* Methods suitable only for lvalues. These include the various
* operations to extract components out of compound data structures,
* such as extracting the field from a struct or a particular element
* from an array.
*/
impl Datum<Lvalue> {
pub fn to_llref(self) -> ValueRef {
/*!
* Converts a datum into a by-ref value. The datum type must
* be one which is always passed by reference.
*/
self.val
}
pub fn get_element(&self,
ty: ty::t,
gep: |ValueRef| -> ValueRef)
-> Datum<Lvalue> {
Datum {
val: gep(self.val),
kind: Lvalue,
ty: ty,
}
}
pub fn get_vec_base_and_len<'a>(&self, bcx: &'a Block<'a>) -> (ValueRef, ValueRef) {
//! Converts a vector into the slice pair.
tvec::get_base_and_len(bcx, self.val, self.ty)
}
}
fn load<'a>(bcx: &'a Block<'a>, llptr: ValueRef, ty: ty::t) -> ValueRef {
/*!
* Private helper for loading from a by-ref datum. Handles various
* special cases where the type gives us better information about
* what we are loading.
*/
if type_is_zero_size(bcx.ccx(), ty) {
C_undef(type_of::type_of(bcx.ccx(), ty))
} else if ty::type_is_bool(ty) {
LoadRangeAssert(bcx, llptr, 0, 2, lib::llvm::False)
} else if ty::type_is_char(ty) {
// a char is a unicode codepoint, and so takes values from 0
// to 0x10FFFF inclusive only.
LoadRangeAssert(bcx, llptr, 0, 0x10FFFF + 1, lib::llvm::False)
} else {
Load(bcx, llptr)
}
}
/**
* Generic methods applicable to any sort of datum.
*/
impl<K:KindOps> Datum<K> {
pub fn to_expr_datum(self) -> Datum<Expr> {
let Datum { val, ty, kind } = self;
Datum { val: val, ty: ty, kind: kind.to_expr_kind() }
}
pub fn store_to<'a>(self,
bcx: &'a Block<'a>,
dst: ValueRef)
-> &'a Block<'a> {
/*!
* Moves or copies this value into a new home, as appropriate
* depending on the type of the datum. This method consumes
* the datum, since it would be incorrect to go on using the
* datum if the value represented is affine (and hence the value
* is moved).
*/
self.shallow_copy(bcx, dst);
self.kind.post_store(bcx, self.val, self.ty)
}
fn shallow_copy<'a>(&self,
bcx: &'a Block<'a>,
dst: ValueRef)
-> &'a Block<'a> {
/*!
* Helper function that performs a shallow copy of this value
* into `dst`, which should be a pointer to a memory location
* suitable for `self.ty`. `dst` should contain uninitialized
* memory (either newly allocated, zeroed, or dropped).
*
* This function is private to datums because it leaves memory
* in an unstable state, where the source value has been
* copied but not zeroed. Public methods are `store_to` (if
* you no longer need the source value) or
* `shallow_copy_and_take` (if you wish the source value to
* remain valid).
*/
let _icx = push_ctxt("copy_to_no_check");
if type_is_zero_size(bcx.ccx(), self.ty) {
return bcx;
}
if self.kind.is_by_ref() {
memcpy_ty(bcx, dst, self.val, self.ty);
} else {
Store(bcx, self.val, dst);
}
return bcx;
}
pub fn shallow_copy_and_take<'a>(&self,
bcx: &'a Block<'a>,
dst: ValueRef)
-> &'a Block<'a> {
/*!
* Copies the value into a new location and runs any necessary
* take glue on the new location. This function always
* preserves the existing datum as a valid value. Therefore,
* it does not consume `self` and, also, cannot be applied to
* affine values (since they must never be duplicated).
*/
assert!(!ty::type_moves_by_default(bcx.tcx(), self.ty));
let mut bcx = bcx;
bcx = self.shallow_copy(bcx, dst);
glue::take_ty(bcx, dst, self.ty)
|
}
|
random_line_split
|
|
datum.rs
|
Datum<K> {
Datum { val: val, ty: ty, kind: kind }
}
pub fn DatumBlock<'a, K>(bcx: &'a Block<'a>,
datum: Datum<K>)
-> DatumBlock<'a, K> {
DatumBlock { bcx: bcx, datum: datum }
}
pub fn immediate_rvalue(val: ValueRef, ty: ty::t) -> Datum<Rvalue> {
return Datum(val, ty, Rvalue(ByValue));
}
pub fn immediate_rvalue_bcx<'a>(bcx: &'a Block<'a>,
val: ValueRef,
ty: ty::t)
-> DatumBlock<'a, Rvalue> {
return DatumBlock(bcx, immediate_rvalue(val, ty))
}
pub fn lvalue_scratch_datum<'a, A>(bcx: &'a Block<'a>,
ty: ty::t,
name: &str,
zero: bool,
scope: cleanup::ScopeId,
arg: A,
populate: |A, &'a Block<'a>, ValueRef|
-> &'a Block<'a>)
-> DatumBlock<'a, Lvalue> {
/*!
* Allocates temporary space on the stack using alloca() and
* returns a by-ref Datum pointing to it. The memory will be
* dropped upon exit from `scope`. The callback `populate` should
* initialize the memory. If `zero` is true, the space will be
* zeroed when it is allocated; this is not necessary unless `bcx`
* does not dominate the end of `scope`.
*/
let llty = type_of::type_of(bcx.ccx(), ty);
let scratch = alloca_maybe_zeroed(bcx, llty, name, zero);
// Subtle. Populate the scratch memory *before* scheduling cleanup.
let bcx = populate(arg, bcx, scratch);
bcx.fcx.schedule_drop_mem(scope, scratch, ty);
DatumBlock(bcx, Datum(scratch, ty, Lvalue))
}
pub fn rvalue_scratch_datum(bcx: &Block,
ty: ty::t,
name: &str)
-> Datum<Rvalue> {
/*!
* Allocates temporary space on the stack using alloca() and
* returns a by-ref Datum pointing to it. If `zero` is true, the
* space will be zeroed when it is allocated; this is normally not
* necessary, but in the case of automatic rooting in match
* statements it is possible to have temporaries that may not get
* initialized if a certain arm is not taken, so we must zero
* them. You must arrange any cleanups etc yourself!
*/
let llty = type_of::type_of(bcx.ccx(), ty);
let scratch = alloca_maybe_zeroed(bcx, llty, name, false);
Datum(scratch, ty, Rvalue(ByRef))
}
pub fn appropriate_rvalue_mode(ccx: &CrateContext, ty: ty::t) -> RvalueMode {
/*!
* Indicates the "appropriate" mode for this value,
* which is either by ref or by value, depending
* on whether type is immediate or not.
*/
if type_is_zero_size(ccx, ty) {
ByValue
} else if type_is_immediate(ccx, ty) {
ByValue
} else {
ByRef
}
}
fn add_rvalue_clean(mode: RvalueMode,
fcx: &FunctionContext,
scope: cleanup::ScopeId,
val: ValueRef,
ty: ty::t) {
match mode {
ByValue => { fcx.schedule_drop_immediate(scope, val, ty); }
ByRef => { fcx.schedule_drop_mem(scope, val, ty); }
}
}
pub trait KindOps {
/**
* Take appropriate action after the value in `datum` has been
* stored to a new location.
*/
fn post_store<'a>(&self,
bcx: &'a Block<'a>,
val: ValueRef,
ty: ty::t)
-> &'a Block<'a>;
/**
* True if this mode is a reference mode, meaning that the datum's
* val field is a pointer to the actual value
*/
fn is_by_ref(&self) -> bool;
/**
* Converts to an Expr kind
*/
fn to_expr_kind(self) -> Expr;
}
impl KindOps for Rvalue {
fn post_store<'a>(&self,
bcx: &'a Block<'a>,
_val: ValueRef,
_ty: ty::t)
-> &'a Block<'a> {
// No cleanup is scheduled for an rvalue, so we don't have
// to do anything after a move to cancel or duplicate it.
bcx
}
fn is_by_ref(&self) -> bool {
self.mode == ByRef
}
fn to_expr_kind(self) -> Expr {
RvalueExpr(self)
}
}
impl KindOps for Lvalue {
fn post_store<'a>(&self,
bcx: &'a Block<'a>,
val: ValueRef,
ty: ty::t)
-> &'a Block<'a> {
/*!
* If an lvalue is moved, we must zero out the memory in which
* it resides so as to cancel cleanup. If an @T lvalue is
* copied, we must increment the reference count.
*/
if ty::type_needs_drop(bcx.tcx(), ty) {
if ty::type_moves_by_default(bcx.tcx(), ty) {
// cancel cleanup of affine values by zeroing out
let () = zero_mem(bcx, val, ty);
bcx
} else {
// incr. refcount for @T or newtype'd @T
glue::take_ty(bcx, val, ty)
}
} else {
bcx
}
}
fn is_by_ref(&self) -> bool {
true
}
fn to_expr_kind(self) -> Expr {
LvalueExpr
}
}
impl KindOps for Expr {
fn post_store<'a>(&self,
bcx: &'a Block<'a>,
val: ValueRef,
ty: ty::t)
-> &'a Block<'a> {
match *self {
LvalueExpr => Lvalue.post_store(bcx, val, ty),
RvalueExpr(ref r) => r.post_store(bcx, val, ty),
}
}
fn is_by_ref(&self) -> bool {
match *self {
LvalueExpr => Lvalue.is_by_ref(),
RvalueExpr(ref r) => r.is_by_ref()
}
}
fn to_expr_kind(self) -> Expr {
self
}
}
impl Datum<Rvalue> {
pub fn add_clean(self,
fcx: &FunctionContext,
scope: cleanup::ScopeId)
-> ValueRef {
/*!
* Schedules a cleanup for this datum in the given scope.
* That means that this datum is no longer an rvalue datum;
* hence, this function consumes the datum and returns the
* contained ValueRef.
*/
add_rvalue_clean(self.kind.mode, fcx, scope, self.val, self.ty);
self.val
}
pub fn to_lvalue_datum_in_scope<'a>(self,
bcx: &'a Block<'a>,
name: &str,
scope: cleanup::ScopeId)
-> DatumBlock<'a, Lvalue> {
/*!
* Returns an lvalue datum (that is, a by ref datum with
* cleanup scheduled). If `self` is not already an lvalue,
* cleanup will be scheduled in the temporary scope for `expr_id`.
*/
let fcx = bcx.fcx;
match self.kind.mode {
ByRef => {
add_rvalue_clean(ByRef, fcx, scope, self.val, self.ty);
DatumBlock(bcx, Datum(self.val, self.ty, Lvalue))
}
ByValue => {
lvalue_scratch_datum(
bcx, self.ty, name, false, scope, self,
|this, bcx, llval| this.store_to(bcx, llval))
}
}
}
pub fn to_ref_datum<'a>(self, bcx: &'a Block<'a>) -> DatumBlock<'a, Rvalue> {
let mut bcx = bcx;
match self.kind.mode {
ByRef => DatumBlock(bcx, self),
ByValue => {
let scratch = rvalue_scratch_datum(bcx, self.ty, "to_ref");
bcx = self.store_to(bcx, scratch.val);
DatumBlock(bcx, scratch)
}
}
}
pub fn to_appropriate_datum<'a>(self,
bcx: &'a Block<'a>)
-> DatumBlock<'a, Rvalue> {
match self.appropriate_rvalue_mode(bcx.ccx()) {
ByRef => {
self.to_ref_datum(bcx)
}
ByValue => {
match self.kind.mode {
ByValue => DatumBlock(bcx, self),
ByRef => {
let llval = load(bcx, self.val, self.ty);
DatumBlock(bcx, Datum(llval, self.ty, Rvalue(ByValue)))
}
}
}
}
}
}
/**
* Methods suitable for "expr" datums that could be either lvalues or
* rvalues. These include coercions into lvalues/rvalues but also a number
* of more general operations. (Some of those operations could be moved to
* the more general `impl<K> Datum<K>`, but it's convenient to have them
* here since we can `match self.kind` rather than having to implement
* generic methods in `KindOps`.)
*/
impl Datum<Expr> {
fn match_kind<R>(self,
if_lvalue: |Datum<Lvalue>| -> R,
if_rvalue: |Datum<Rvalue>| -> R)
-> R {
let Datum { val, ty, kind } = self;
match kind {
LvalueExpr => if_lvalue(Datum(val, ty, Lvalue)),
RvalueExpr(r) => if_rvalue(Datum(val, ty, r)),
}
}
#[allow(dead_code)] // potentially useful
pub fn assert_lvalue(self, bcx: &Block) -> Datum<Lvalue> {
/*!
* Asserts that this datum *is* an lvalue and returns it.
*/
self.match_kind(
|d| d,
|_| bcx.sess().bug("assert_lvalue given rvalue"))
}
pub fn assert_rvalue(self, bcx: &Block) -> Datum<Rvalue> {
/*!
* Asserts that this datum *is* an lvalue and returns it.
*/
self.match_kind(
|_| bcx.sess().bug("assert_rvalue given lvalue"),
|r| r)
}
pub fn store_to_dest<'a>(self,
bcx: &'a Block<'a>,
dest: expr::Dest,
expr_id: ast::NodeId)
-> &'a Block<'a> {
match dest {
expr::Ignore => {
self.add_clean_if_rvalue(bcx, expr_id);
bcx
}
expr::SaveIn(addr) => {
self.store_to(bcx, addr)
}
}
}
pub fn add_clean_if_rvalue<'a>(self,
bcx: &'a Block<'a>,
expr_id: ast::NodeId) {
/*!
* Arranges cleanup for `self` if it is an rvalue. Use when
* you are done working with a value that may need drop.
*/
self.match_kind(
|_| { /* Nothing to do, cleanup already arranged */ },
|r| {
let scope = cleanup::temporary_scope(bcx.tcx(), expr_id);
r.add_clean(bcx.fcx, scope);
})
}
pub fn clean<'a>(self,
bcx: &'a Block<'a>,
name: &'static str,
expr_id: ast::NodeId)
-> &'a Block<'a> {
/*!
* Ensures that `self` will get cleaned up, if it is not an lvalue
* already.
*/
self.to_lvalue_datum(bcx, name, expr_id).bcx
}
pub fn to_lvalue_datum<'a>(self,
bcx: &'a Block<'a>,
name: &str,
expr_id: ast::NodeId)
-> DatumBlock<'a, Lvalue> {
self.match_kind(
|l| DatumBlock(bcx, l),
|r| {
let scope = cleanup::temporary_scope(bcx.tcx(), expr_id);
r.to_lvalue_datum_in_scope(bcx, name, scope)
})
}
pub fn to_rvalue_datum<'a>(self,
bcx: &'a Block<'a>,
name: &'static str)
-> DatumBlock<'a, Rvalue> {
/*!
* Ensures that we have an rvalue datum (that is, a datum with
* no cleanup scheduled).
*/
self.match_kind(
|l| {
let mut bcx = bcx;
match l.appropriate_rvalue_mode(bcx.ccx()) {
ByRef => {
let scratch = rvalue_scratch_datum(bcx, l.ty, name);
bcx = l.store_to(bcx, scratch.val);
DatumBlock(bcx, scratch)
}
ByValue => {
let v = load(bcx, l.val, l.ty);
bcx = l.kind.post_store(bcx, l.val, l.ty);
DatumBlock(bcx, Datum(v, l.ty, Rvalue(ByValue)))
}
}
},
|r| DatumBlock(bcx, r))
}
}
/**
* Methods suitable only for lvalues. These include the various
* operations to extract components out of compound data structures,
* such as extracting the field from a struct or a particular element
* from an array.
*/
impl Datum<Lvalue> {
pub fn
|
(self) -> ValueRef {
/*!
* Converts a datum into a by-ref value. The datum type must
* be one which is always passed by reference.
*/
self.val
}
pub fn get_element(&self,
ty: ty::t,
gep: |ValueRef| -> ValueRef)
-> Datum<Lvalue> {
Datum {
val: gep(self.val),
kind: Lvalue,
ty: ty,
}
}
pub fn get_vec_base_and_len<'a>(&self, bcx: &'a Block<'a>) -> (ValueRef, ValueRef) {
//! Converts a vector into the slice pair.
tvec::get_base_and_len(bcx, self.val, self.ty)
}
}
fn load<'a>(bcx: &'a Block<'a>, llptr: ValueRef, ty: ty::t) -> ValueRef {
/*!
* Private helper for loading from a by-ref datum. Handles various
* special cases where the type gives us better information about
* what we are loading.
*/
if type_is_zero_size(bcx.ccx(), ty) {
C_undef(type_of::type_of(bcx.ccx(), ty))
} else if ty::type_is_bool(ty) {
LoadRangeAssert(bcx, llptr, 0, 2, lib::llvm::False)
} else if ty::type_is_char(ty) {
// a char is a unicode codepoint, and so takes values from 0
// to 0x10FFFF inclusive only.
LoadRangeAssert(bcx, llptr, 0, 0x10FFFF + 1, lib::llvm::False)
} else {
Load(bcx, llptr)
}
}
/**
* Generic methods applicable to any sort of datum.
*/
impl<K:KindOps> Datum<K> {
pub fn to_expr_datum(self) -> Datum<Expr> {
let Datum { val, ty, kind } = self;
Datum { val: val, ty: ty, kind: kind.to_expr_kind() }
}
pub fn store_to<'a>(self,
bcx: &'a Block<'a>,
dst: ValueRef)
-> &'a Block<'a> {
/*!
* Moves or copies this value into a new home, as appropriate
* depending on the type of the datum. This method consumes
* the datum, since it would be incorrect to go on using the
* datum if the value represented is affine (and hence the value
* is moved).
*/
self.shallow_copy(bcx, dst);
self.kind.post_store(bcx, self.val, self
|
to_llref
|
identifier_name
|
datum.rs
|
Datum<K> {
Datum { val: val, ty: ty, kind: kind }
}
pub fn DatumBlock<'a, K>(bcx: &'a Block<'a>,
datum: Datum<K>)
-> DatumBlock<'a, K> {
DatumBlock { bcx: bcx, datum: datum }
}
pub fn immediate_rvalue(val: ValueRef, ty: ty::t) -> Datum<Rvalue> {
return Datum(val, ty, Rvalue(ByValue));
}
pub fn immediate_rvalue_bcx<'a>(bcx: &'a Block<'a>,
val: ValueRef,
ty: ty::t)
-> DatumBlock<'a, Rvalue> {
return DatumBlock(bcx, immediate_rvalue(val, ty))
}
pub fn lvalue_scratch_datum<'a, A>(bcx: &'a Block<'a>,
ty: ty::t,
name: &str,
zero: bool,
scope: cleanup::ScopeId,
arg: A,
populate: |A, &'a Block<'a>, ValueRef|
-> &'a Block<'a>)
-> DatumBlock<'a, Lvalue> {
/*!
* Allocates temporary space on the stack using alloca() and
* returns a by-ref Datum pointing to it. The memory will be
* dropped upon exit from `scope`. The callback `populate` should
* initialize the memory. If `zero` is true, the space will be
* zeroed when it is allocated; this is not necessary unless `bcx`
* does not dominate the end of `scope`.
*/
let llty = type_of::type_of(bcx.ccx(), ty);
let scratch = alloca_maybe_zeroed(bcx, llty, name, zero);
// Subtle. Populate the scratch memory *before* scheduling cleanup.
let bcx = populate(arg, bcx, scratch);
bcx.fcx.schedule_drop_mem(scope, scratch, ty);
DatumBlock(bcx, Datum(scratch, ty, Lvalue))
}
pub fn rvalue_scratch_datum(bcx: &Block,
ty: ty::t,
name: &str)
-> Datum<Rvalue> {
/*!
* Allocates temporary space on the stack using alloca() and
* returns a by-ref Datum pointing to it. If `zero` is true, the
* space will be zeroed when it is allocated; this is normally not
* necessary, but in the case of automatic rooting in match
* statements it is possible to have temporaries that may not get
* initialized if a certain arm is not taken, so we must zero
* them. You must arrange any cleanups etc yourself!
*/
let llty = type_of::type_of(bcx.ccx(), ty);
let scratch = alloca_maybe_zeroed(bcx, llty, name, false);
Datum(scratch, ty, Rvalue(ByRef))
}
pub fn appropriate_rvalue_mode(ccx: &CrateContext, ty: ty::t) -> RvalueMode {
/*!
* Indicates the "appropriate" mode for this value,
* which is either by ref or by value, depending
* on whether type is immediate or not.
*/
if type_is_zero_size(ccx, ty) {
ByValue
} else if type_is_immediate(ccx, ty) {
ByValue
} else {
ByRef
}
}
fn add_rvalue_clean(mode: RvalueMode,
fcx: &FunctionContext,
scope: cleanup::ScopeId,
val: ValueRef,
ty: ty::t) {
match mode {
ByValue => { fcx.schedule_drop_immediate(scope, val, ty); }
ByRef => { fcx.schedule_drop_mem(scope, val, ty); }
}
}
pub trait KindOps {
/**
* Take appropriate action after the value in `datum` has been
* stored to a new location.
*/
fn post_store<'a>(&self,
bcx: &'a Block<'a>,
val: ValueRef,
ty: ty::t)
-> &'a Block<'a>;
/**
* True if this mode is a reference mode, meaning that the datum's
* val field is a pointer to the actual value
*/
fn is_by_ref(&self) -> bool;
/**
* Converts to an Expr kind
*/
fn to_expr_kind(self) -> Expr;
}
impl KindOps for Rvalue {
fn post_store<'a>(&self,
bcx: &'a Block<'a>,
_val: ValueRef,
_ty: ty::t)
-> &'a Block<'a> {
// No cleanup is scheduled for an rvalue, so we don't have
// to do anything after a move to cancel or duplicate it.
bcx
}
fn is_by_ref(&self) -> bool {
self.mode == ByRef
}
fn to_expr_kind(self) -> Expr {
RvalueExpr(self)
}
}
impl KindOps for Lvalue {
fn post_store<'a>(&self,
bcx: &'a Block<'a>,
val: ValueRef,
ty: ty::t)
-> &'a Block<'a> {
/*!
* If an lvalue is moved, we must zero out the memory in which
* it resides so as to cancel cleanup. If an @T lvalue is
* copied, we must increment the reference count.
*/
if ty::type_needs_drop(bcx.tcx(), ty) {
if ty::type_moves_by_default(bcx.tcx(), ty) {
// cancel cleanup of affine values by zeroing out
let () = zero_mem(bcx, val, ty);
bcx
} else {
// incr. refcount for @T or newtype'd @T
glue::take_ty(bcx, val, ty)
}
} else {
bcx
}
}
fn is_by_ref(&self) -> bool {
true
}
fn to_expr_kind(self) -> Expr {
LvalueExpr
}
}
impl KindOps for Expr {
fn post_store<'a>(&self,
bcx: &'a Block<'a>,
val: ValueRef,
ty: ty::t)
-> &'a Block<'a> {
match *self {
LvalueExpr => Lvalue.post_store(bcx, val, ty),
RvalueExpr(ref r) => r.post_store(bcx, val, ty),
}
}
fn is_by_ref(&self) -> bool {
match *self {
LvalueExpr => Lvalue.is_by_ref(),
RvalueExpr(ref r) => r.is_by_ref()
}
}
fn to_expr_kind(self) -> Expr {
self
}
}
impl Datum<Rvalue> {
pub fn add_clean(self,
fcx: &FunctionContext,
scope: cleanup::ScopeId)
-> ValueRef {
/*!
* Schedules a cleanup for this datum in the given scope.
* That means that this datum is no longer an rvalue datum;
* hence, this function consumes the datum and returns the
* contained ValueRef.
*/
add_rvalue_clean(self.kind.mode, fcx, scope, self.val, self.ty);
self.val
}
pub fn to_lvalue_datum_in_scope<'a>(self,
bcx: &'a Block<'a>,
name: &str,
scope: cleanup::ScopeId)
-> DatumBlock<'a, Lvalue> {
/*!
* Returns an lvalue datum (that is, a by ref datum with
* cleanup scheduled). If `self` is not already an lvalue,
* cleanup will be scheduled in the temporary scope for `expr_id`.
*/
let fcx = bcx.fcx;
match self.kind.mode {
ByRef => {
add_rvalue_clean(ByRef, fcx, scope, self.val, self.ty);
DatumBlock(bcx, Datum(self.val, self.ty, Lvalue))
}
ByValue => {
lvalue_scratch_datum(
bcx, self.ty, name, false, scope, self,
|this, bcx, llval| this.store_to(bcx, llval))
}
}
}
pub fn to_ref_datum<'a>(self, bcx: &'a Block<'a>) -> DatumBlock<'a, Rvalue> {
let mut bcx = bcx;
match self.kind.mode {
ByRef => DatumBlock(bcx, self),
ByValue => {
let scratch = rvalue_scratch_datum(bcx, self.ty, "to_ref");
bcx = self.store_to(bcx, scratch.val);
DatumBlock(bcx, scratch)
}
}
}
pub fn to_appropriate_datum<'a>(self,
bcx: &'a Block<'a>)
-> DatumBlock<'a, Rvalue> {
match self.appropriate_rvalue_mode(bcx.ccx()) {
ByRef => {
self.to_ref_datum(bcx)
}
ByValue => {
match self.kind.mode {
ByValue => DatumBlock(bcx, self),
ByRef => {
let llval = load(bcx, self.val, self.ty);
DatumBlock(bcx, Datum(llval, self.ty, Rvalue(ByValue)))
}
}
}
}
}
}
/**
* Methods suitable for "expr" datums that could be either lvalues or
* rvalues. These include coercions into lvalues/rvalues but also a number
* of more general operations. (Some of those operations could be moved to
* the more general `impl<K> Datum<K>`, but it's convenient to have them
* here since we can `match self.kind` rather than having to implement
* generic methods in `KindOps`.)
*/
impl Datum<Expr> {
fn match_kind<R>(self,
if_lvalue: |Datum<Lvalue>| -> R,
if_rvalue: |Datum<Rvalue>| -> R)
-> R {
let Datum { val, ty, kind } = self;
match kind {
LvalueExpr => if_lvalue(Datum(val, ty, Lvalue)),
RvalueExpr(r) => if_rvalue(Datum(val, ty, r)),
}
}
#[allow(dead_code)] // potentially useful
pub fn assert_lvalue(self, bcx: &Block) -> Datum<Lvalue> {
/*!
* Asserts that this datum *is* an lvalue and returns it.
*/
self.match_kind(
|d| d,
|_| bcx.sess().bug("assert_lvalue given rvalue"))
}
pub fn assert_rvalue(self, bcx: &Block) -> Datum<Rvalue> {
/*!
* Asserts that this datum *is* an lvalue and returns it.
*/
self.match_kind(
|_| bcx.sess().bug("assert_rvalue given lvalue"),
|r| r)
}
pub fn store_to_dest<'a>(self,
bcx: &'a Block<'a>,
dest: expr::Dest,
expr_id: ast::NodeId)
-> &'a Block<'a> {
match dest {
expr::Ignore => {
self.add_clean_if_rvalue(bcx, expr_id);
bcx
}
expr::SaveIn(addr) => {
self.store_to(bcx, addr)
}
}
}
pub fn add_clean_if_rvalue<'a>(self,
bcx: &'a Block<'a>,
expr_id: ast::NodeId) {
/*!
* Arranges cleanup for `self` if it is an rvalue. Use when
* you are done working with a value that may need drop.
*/
self.match_kind(
|_| { /* Nothing to do, cleanup already arranged */ },
|r| {
let scope = cleanup::temporary_scope(bcx.tcx(), expr_id);
r.add_clean(bcx.fcx, scope);
})
}
pub fn clean<'a>(self,
bcx: &'a Block<'a>,
name: &'static str,
expr_id: ast::NodeId)
-> &'a Block<'a> {
/*!
* Ensures that `self` will get cleaned up, if it is not an lvalue
* already.
*/
self.to_lvalue_datum(bcx, name, expr_id).bcx
}
pub fn to_lvalue_datum<'a>(self,
bcx: &'a Block<'a>,
name: &str,
expr_id: ast::NodeId)
-> DatumBlock<'a, Lvalue>
|
pub fn to_rvalue_datum<'a>(self,
bcx: &'a Block<'a>,
name: &'static str)
-> DatumBlock<'a, Rvalue> {
/*!
* Ensures that we have an rvalue datum (that is, a datum with
* no cleanup scheduled).
*/
self.match_kind(
|l| {
let mut bcx = bcx;
match l.appropriate_rvalue_mode(bcx.ccx()) {
ByRef => {
let scratch = rvalue_scratch_datum(bcx, l.ty, name);
bcx = l.store_to(bcx, scratch.val);
DatumBlock(bcx, scratch)
}
ByValue => {
let v = load(bcx, l.val, l.ty);
bcx = l.kind.post_store(bcx, l.val, l.ty);
DatumBlock(bcx, Datum(v, l.ty, Rvalue(ByValue)))
}
}
},
|r| DatumBlock(bcx, r))
}
}
/**
* Methods suitable only for lvalues. These include the various
* operations to extract components out of compound data structures,
* such as extracting the field from a struct or a particular element
* from an array.
*/
impl Datum<Lvalue> {
pub fn to_llref(self) -> ValueRef {
/*!
* Converts a datum into a by-ref value. The datum type must
* be one which is always passed by reference.
*/
self.val
}
pub fn get_element(&self,
ty: ty::t,
gep: |ValueRef| -> ValueRef)
-> Datum<Lvalue> {
Datum {
val: gep(self.val),
kind: Lvalue,
ty: ty,
}
}
pub fn get_vec_base_and_len<'a>(&self, bcx: &'a Block<'a>) -> (ValueRef, ValueRef) {
//! Converts a vector into the slice pair.
tvec::get_base_and_len(bcx, self.val, self.ty)
}
}
fn load<'a>(bcx: &'a Block<'a>, llptr: ValueRef, ty: ty::t) -> ValueRef {
/*!
* Private helper for loading from a by-ref datum. Handles various
* special cases where the type gives us better information about
* what we are loading.
*/
if type_is_zero_size(bcx.ccx(), ty) {
C_undef(type_of::type_of(bcx.ccx(), ty))
} else if ty::type_is_bool(ty) {
LoadRangeAssert(bcx, llptr, 0, 2, lib::llvm::False)
} else if ty::type_is_char(ty) {
// a char is a unicode codepoint, and so takes values from 0
// to 0x10FFFF inclusive only.
LoadRangeAssert(bcx, llptr, 0, 0x10FFFF + 1, lib::llvm::False)
} else {
Load(bcx, llptr)
}
}
/**
* Generic methods applicable to any sort of datum.
*/
impl<K:KindOps> Datum<K> {
pub fn to_expr_datum(self) -> Datum<Expr> {
let Datum { val, ty, kind } = self;
Datum { val: val, ty: ty, kind: kind.to_expr_kind() }
}
pub fn store_to<'a>(self,
bcx: &'a Block<'a>,
dst: ValueRef)
-> &'a Block<'a> {
/*!
* Moves or copies this value into a new home, as appropriate
* depending on the type of the datum. This method consumes
* the datum, since it would be incorrect to go on using the
* datum if the value represented is affine (and hence the value
* is moved).
*/
self.shallow_copy(bcx, dst);
self.kind.post_store(bcx, self.val, self
|
{
self.match_kind(
|l| DatumBlock(bcx, l),
|r| {
let scope = cleanup::temporary_scope(bcx.tcx(), expr_id);
r.to_lvalue_datum_in_scope(bcx, name, scope)
})
}
|
identifier_body
|
utils.rs
|
pub fn char_width(c: char, is_cjk: bool, tab_width: usize, position: usize) -> Option<usize> {
use unicode_width::UnicodeWidthChar;
if c == '\t' {
Some(tab_width - position%tab_width)
} else if c == '\n' {
Some(1)
} else if is_cjk {
UnicodeWidthChar::width_cjk(c)
} else {
UnicodeWidthChar::width(c)
}
}
pub fn str_width(s: &str, is_cjk: bool, tab_width: usize) -> usize {
s.chars().fold(0, |acc, c|
acc + char_width(c, is_cjk, tab_width, acc).unwrap_or(0)
)
}
/// Determine if a given char is alphanumeric or an underscore
pub fn is_alpha_or_(c: char) -> bool
|
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_is_alpha_or_() {
assert!(is_alpha_or_('a'));
assert!(is_alpha_or_('5'));
assert!(is_alpha_or_('_'));
}
}
|
{
c.is_alphanumeric() || c == '_'
}
|
identifier_body
|
utils.rs
|
pub fn char_width(c: char, is_cjk: bool, tab_width: usize, position: usize) -> Option<usize> {
use unicode_width::UnicodeWidthChar;
if c == '\t' {
Some(tab_width - position%tab_width)
} else if c == '\n' {
Some(1)
|
UnicodeWidthChar::width(c)
}
}
pub fn str_width(s: &str, is_cjk: bool, tab_width: usize) -> usize {
s.chars().fold(0, |acc, c|
acc + char_width(c, is_cjk, tab_width, acc).unwrap_or(0)
)
}
/// Determine if a given char is alphanumeric or an underscore
pub fn is_alpha_or_(c: char) -> bool {
c.is_alphanumeric() || c == '_'
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_is_alpha_or_() {
assert!(is_alpha_or_('a'));
assert!(is_alpha_or_('5'));
assert!(is_alpha_or_('_'));
}
}
|
} else if is_cjk {
UnicodeWidthChar::width_cjk(c)
} else {
|
random_line_split
|
utils.rs
|
pub fn
|
(c: char, is_cjk: bool, tab_width: usize, position: usize) -> Option<usize> {
use unicode_width::UnicodeWidthChar;
if c == '\t' {
Some(tab_width - position%tab_width)
} else if c == '\n' {
Some(1)
} else if is_cjk {
UnicodeWidthChar::width_cjk(c)
} else {
UnicodeWidthChar::width(c)
}
}
pub fn str_width(s: &str, is_cjk: bool, tab_width: usize) -> usize {
s.chars().fold(0, |acc, c|
acc + char_width(c, is_cjk, tab_width, acc).unwrap_or(0)
)
}
/// Determine if a given char is alphanumeric or an underscore
pub fn is_alpha_or_(c: char) -> bool {
c.is_alphanumeric() || c == '_'
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_is_alpha_or_() {
assert!(is_alpha_or_('a'));
assert!(is_alpha_or_('5'));
assert!(is_alpha_or_('_'));
}
}
|
char_width
|
identifier_name
|
mir_codegen_switch.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
enum Abc {
A(u8),
B(i8),
C,
D,
}
fn foo(x: Abc) -> i32 {
match x {
Abc::C => 3,
Abc::D => 4,
Abc::B(_) => 2,
Abc::A(_) => 1,
}
}
fn
|
(x: Abc) -> bool {
match x {
Abc::D => true,
_ => false
}
}
fn main() {
assert_eq!(1, foo(Abc::A(42)));
assert_eq!(2, foo(Abc::B(-100)));
assert_eq!(3, foo(Abc::C));
assert_eq!(4, foo(Abc::D));
assert_eq!(false, foo2(Abc::A(1)));
assert_eq!(false, foo2(Abc::B(2)));
assert_eq!(false, foo2(Abc::C));
assert_eq!(true, foo2(Abc::D));
}
|
foo2
|
identifier_name
|
mir_codegen_switch.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
enum Abc {
A(u8),
B(i8),
C,
D,
}
fn foo(x: Abc) -> i32 {
match x {
Abc::C => 3,
Abc::D => 4,
Abc::B(_) => 2,
Abc::A(_) => 1,
}
}
fn foo2(x: Abc) -> bool
|
fn main() {
assert_eq!(1, foo(Abc::A(42)));
assert_eq!(2, foo(Abc::B(-100)));
assert_eq!(3, foo(Abc::C));
assert_eq!(4, foo(Abc::D));
assert_eq!(false, foo2(Abc::A(1)));
assert_eq!(false, foo2(Abc::B(2)));
assert_eq!(false, foo2(Abc::C));
assert_eq!(true, foo2(Abc::D));
}
|
{
match x {
Abc::D => true,
_ => false
}
}
|
identifier_body
|
mir_codegen_switch.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
enum Abc {
A(u8),
B(i8),
C,
D,
}
fn foo(x: Abc) -> i32 {
match x {
Abc::C => 3,
Abc::D => 4,
Abc::B(_) => 2,
|
match x {
Abc::D => true,
_ => false
}
}
fn main() {
assert_eq!(1, foo(Abc::A(42)));
assert_eq!(2, foo(Abc::B(-100)));
assert_eq!(3, foo(Abc::C));
assert_eq!(4, foo(Abc::D));
assert_eq!(false, foo2(Abc::A(1)));
assert_eq!(false, foo2(Abc::B(2)));
assert_eq!(false, foo2(Abc::C));
assert_eq!(true, foo2(Abc::D));
}
|
Abc::A(_) => 1,
}
}
fn foo2(x: Abc) -> bool {
|
random_line_split
|
reader.rs
|
use std::io::Read;
use byteorder::{BigEndian, LittleEndian};
use crate::errors::PcapError;
use crate::pcapng::blocks::{ParsedBlock, EnhancedPacketBlock, InterfaceDescriptionBlock};
use crate::Endianness;
use crate::peek_reader::PeekReader;
use crate::pcapng::{Block, SectionHeaderBlock, BlockType};
/// Wraps another reader and uses it to read a PcapNg formated stream.
///
/// It implements the Iterator trait in order to read one block at a time except the first SectionHeaderBlock
///
/// # Examples
///
/// ```rust,no_run
/// use std::fs::File;
/// use pcap_file::pcapng::PcapNgReader;
///
/// let file_in = File::open("test.pcapng").expect("Error opening file");
/// let pcapng_reader = PcapNgReader::new(file_in).unwrap();
///
/// // Read test.pcapng
/// for block in pcapng_reader {
///
/// //Check if there is no error
/// let block = block.unwrap();
///
/// //Parse block content
/// let parsed_block = block.parsed().unwrap();
///
/// //Do something
/// }
/// ```
pub struct PcapNgReader<R: Read> {
reader: PeekReader<R>,
section: SectionHeaderBlock<'static>,
interfaces: Vec<InterfaceDescriptionBlock<'static>>
}
impl<R: Read> PcapNgReader<R> {
/// Creates a new `PcapNgReader` from a reader.
/// Parses the first block which must be a valid SectionHeaderBlock
pub fn new(mut reader: R) -> Result<PcapNgReader<R>, PcapError> {
let current_block = Block::from_reader::<_, BigEndian>(&mut reader)?;
let section = current_block.parsed()?;
let section = match section {
ParsedBlock::SectionHeader(section) => section.into_owned(),
_ => return Err(PcapError::InvalidField("SectionHeader missing"))
};
Ok(
PcapNgReader {
reader: PeekReader::new(reader),
section,
interfaces: vec![]
}
)
}
/// Returns the current SectionHeaderBlock
pub fn section(&self) -> &SectionHeaderBlock<'static> {
&self.section
}
/// Returns the current interfaces
pub fn interfaces(&self) -> &[InterfaceDescriptionBlock<'static>] {
&self.interfaces[..]
}
/// Returns the InterfaceDescriptionBlock corresponding to the given packet
pub fn
|
(&self, packet: &EnhancedPacketBlock) -> Option<&InterfaceDescriptionBlock> {
self.interfaces.get(packet.interface_id as usize)
}
fn next_impl(&mut self) -> Result<Block<'static>, PcapError> {
// Read next Block
let endianess = self.section.endianness();
let block = match endianess {
Endianness::Big => Block::from_reader::<_, BigEndian>(&mut self.reader)?,
Endianness::Little => Block::from_reader::<_, LittleEndian>(&mut self.reader)?
};
match block.type_ {
BlockType::SectionHeader => {
self.section = block.parsed()?.into_section_header().unwrap().into_owned();
self.interfaces.clear();
},
BlockType::InterfaceDescription => {
self.interfaces.push(block.parsed()?.into_interface_description().unwrap().into_owned())
},
_ => {}
}
Ok(block)
}
}
impl<R: Read> Iterator for PcapNgReader<R> {
type Item = Result<Block<'static>, PcapError>;
fn next(&mut self) -> Option<Self::Item> {
match self.reader.is_empty() {
Ok(is_empty) if is_empty => return None,
Err(err) => return Some(Err(err.into())),
_ => {}
}
Some(self.next_impl())
}
}
|
packet_interface
|
identifier_name
|
reader.rs
|
use std::io::Read;
use byteorder::{BigEndian, LittleEndian};
use crate::errors::PcapError;
use crate::pcapng::blocks::{ParsedBlock, EnhancedPacketBlock, InterfaceDescriptionBlock};
use crate::Endianness;
use crate::peek_reader::PeekReader;
use crate::pcapng::{Block, SectionHeaderBlock, BlockType};
/// Wraps another reader and uses it to read a PcapNg formated stream.
///
/// It implements the Iterator trait in order to read one block at a time except the first SectionHeaderBlock
///
/// # Examples
///
/// ```rust,no_run
/// use std::fs::File;
/// use pcap_file::pcapng::PcapNgReader;
///
/// let file_in = File::open("test.pcapng").expect("Error opening file");
/// let pcapng_reader = PcapNgReader::new(file_in).unwrap();
///
/// // Read test.pcapng
/// for block in pcapng_reader {
///
/// //Check if there is no error
/// let block = block.unwrap();
///
/// //Parse block content
/// let parsed_block = block.parsed().unwrap();
///
/// //Do something
/// }
/// ```
pub struct PcapNgReader<R: Read> {
reader: PeekReader<R>,
section: SectionHeaderBlock<'static>,
interfaces: Vec<InterfaceDescriptionBlock<'static>>
}
impl<R: Read> PcapNgReader<R> {
/// Creates a new `PcapNgReader` from a reader.
/// Parses the first block which must be a valid SectionHeaderBlock
pub fn new(mut reader: R) -> Result<PcapNgReader<R>, PcapError>
|
/// Returns the current SectionHeaderBlock
pub fn section(&self) -> &SectionHeaderBlock<'static> {
&self.section
}
/// Returns the current interfaces
pub fn interfaces(&self) -> &[InterfaceDescriptionBlock<'static>] {
&self.interfaces[..]
}
/// Returns the InterfaceDescriptionBlock corresponding to the given packet
pub fn packet_interface(&self, packet: &EnhancedPacketBlock) -> Option<&InterfaceDescriptionBlock> {
self.interfaces.get(packet.interface_id as usize)
}
fn next_impl(&mut self) -> Result<Block<'static>, PcapError> {
// Read next Block
let endianess = self.section.endianness();
let block = match endianess {
Endianness::Big => Block::from_reader::<_, BigEndian>(&mut self.reader)?,
Endianness::Little => Block::from_reader::<_, LittleEndian>(&mut self.reader)?
};
match block.type_ {
BlockType::SectionHeader => {
self.section = block.parsed()?.into_section_header().unwrap().into_owned();
self.interfaces.clear();
},
BlockType::InterfaceDescription => {
self.interfaces.push(block.parsed()?.into_interface_description().unwrap().into_owned())
},
_ => {}
}
Ok(block)
}
}
impl<R: Read> Iterator for PcapNgReader<R> {
type Item = Result<Block<'static>, PcapError>;
fn next(&mut self) -> Option<Self::Item> {
match self.reader.is_empty() {
Ok(is_empty) if is_empty => return None,
Err(err) => return Some(Err(err.into())),
_ => {}
}
Some(self.next_impl())
}
}
|
{
let current_block = Block::from_reader::<_, BigEndian>(&mut reader)?;
let section = current_block.parsed()?;
let section = match section {
ParsedBlock::SectionHeader(section) => section.into_owned(),
_ => return Err(PcapError::InvalidField("SectionHeader missing"))
};
Ok(
PcapNgReader {
reader: PeekReader::new(reader),
section,
interfaces: vec![]
}
)
}
|
identifier_body
|
reader.rs
|
use std::io::Read;
use byteorder::{BigEndian, LittleEndian};
use crate::errors::PcapError;
use crate::pcapng::blocks::{ParsedBlock, EnhancedPacketBlock, InterfaceDescriptionBlock};
use crate::Endianness;
use crate::peek_reader::PeekReader;
use crate::pcapng::{Block, SectionHeaderBlock, BlockType};
/// Wraps another reader and uses it to read a PcapNg formated stream.
///
/// It implements the Iterator trait in order to read one block at a time except the first SectionHeaderBlock
///
/// # Examples
///
/// ```rust,no_run
/// use std::fs::File;
/// use pcap_file::pcapng::PcapNgReader;
///
/// let file_in = File::open("test.pcapng").expect("Error opening file");
/// let pcapng_reader = PcapNgReader::new(file_in).unwrap();
///
/// // Read test.pcapng
/// for block in pcapng_reader {
///
/// //Check if there is no error
/// let block = block.unwrap();
///
/// //Parse block content
/// let parsed_block = block.parsed().unwrap();
///
/// //Do something
/// }
/// ```
pub struct PcapNgReader<R: Read> {
reader: PeekReader<R>,
section: SectionHeaderBlock<'static>,
interfaces: Vec<InterfaceDescriptionBlock<'static>>
}
impl<R: Read> PcapNgReader<R> {
/// Creates a new `PcapNgReader` from a reader.
/// Parses the first block which must be a valid SectionHeaderBlock
pub fn new(mut reader: R) -> Result<PcapNgReader<R>, PcapError> {
let current_block = Block::from_reader::<_, BigEndian>(&mut reader)?;
let section = current_block.parsed()?;
let section = match section {
ParsedBlock::SectionHeader(section) => section.into_owned(),
_ => return Err(PcapError::InvalidField("SectionHeader missing"))
};
Ok(
PcapNgReader {
reader: PeekReader::new(reader),
section,
interfaces: vec![]
}
)
}
/// Returns the current SectionHeaderBlock
pub fn section(&self) -> &SectionHeaderBlock<'static> {
&self.section
}
/// Returns the current interfaces
pub fn interfaces(&self) -> &[InterfaceDescriptionBlock<'static>] {
&self.interfaces[..]
}
/// Returns the InterfaceDescriptionBlock corresponding to the given packet
pub fn packet_interface(&self, packet: &EnhancedPacketBlock) -> Option<&InterfaceDescriptionBlock> {
self.interfaces.get(packet.interface_id as usize)
}
fn next_impl(&mut self) -> Result<Block<'static>, PcapError> {
// Read next Block
let endianess = self.section.endianness();
let block = match endianess {
Endianness::Big => Block::from_reader::<_, BigEndian>(&mut self.reader)?,
Endianness::Little => Block::from_reader::<_, LittleEndian>(&mut self.reader)?
};
match block.type_ {
BlockType::SectionHeader => {
self.section = block.parsed()?.into_section_header().unwrap().into_owned();
self.interfaces.clear();
},
BlockType::InterfaceDescription => {
self.interfaces.push(block.parsed()?.into_interface_description().unwrap().into_owned())
},
_ => {}
}
Ok(block)
}
}
impl<R: Read> Iterator for PcapNgReader<R> {
type Item = Result<Block<'static>, PcapError>;
fn next(&mut self) -> Option<Self::Item> {
match self.reader.is_empty() {
Ok(is_empty) if is_empty => return None,
Err(err) => return Some(Err(err.into())),
_ => {}
}
Some(self.next_impl())
|
}
}
|
random_line_split
|
|
insert_statement_does_not_support_returning_methods_on_sqlite.rs
|
#[macro_use]
extern crate diesel;
use diesel::*;
use diesel::sqlite::{Sqlite, SqliteQueryBuilder, SqliteConnection};
use diesel::backend::Backend;
use diesel::types::{Integer, VarChar};
table! {
users {
id -> Integer,
name -> VarChar,
}
}
pub struct User {
id: i32,
name: String,
}
use diesel::types::FromSqlRow;
impl<DB: Backend> Queryable<(Integer, VarChar), DB> for User where
(i32, String): FromSqlRow<(Integer, VarChar), DB>,
{
type Row = (i32, String);
fn build(row: Self::Row) -> Self {
User {
id: row.0,
name: row.1,
}
}
}
pub struct
|
(String);
Insertable! {
(users)
pub struct NewUser(#[column_name(name)] String,);
}
fn main() {
let connection = SqliteConnection::establish(":memory:").unwrap();
insert(&NewUser("Hello".into()))
.into(users::table)
.get_result::<User>(&connection);
//~^ ERROR: SupportsReturningClause
insert(&NewUser("Hello".into()))
.into(users::table)
.returning(users::name)
.get_result(&connection);
//~^ ERROR: SupportsReturningClause
}
|
NewUser
|
identifier_name
|
insert_statement_does_not_support_returning_methods_on_sqlite.rs
|
#[macro_use]
extern crate diesel;
use diesel::*;
use diesel::sqlite::{Sqlite, SqliteQueryBuilder, SqliteConnection};
|
table! {
users {
id -> Integer,
name -> VarChar,
}
}
pub struct User {
id: i32,
name: String,
}
use diesel::types::FromSqlRow;
impl<DB: Backend> Queryable<(Integer, VarChar), DB> for User where
(i32, String): FromSqlRow<(Integer, VarChar), DB>,
{
type Row = (i32, String);
fn build(row: Self::Row) -> Self {
User {
id: row.0,
name: row.1,
}
}
}
pub struct NewUser(String);
Insertable! {
(users)
pub struct NewUser(#[column_name(name)] String,);
}
fn main() {
let connection = SqliteConnection::establish(":memory:").unwrap();
insert(&NewUser("Hello".into()))
.into(users::table)
.get_result::<User>(&connection);
//~^ ERROR: SupportsReturningClause
insert(&NewUser("Hello".into()))
.into(users::table)
.returning(users::name)
.get_result(&connection);
//~^ ERROR: SupportsReturningClause
}
|
use diesel::backend::Backend;
use diesel::types::{Integer, VarChar};
|
random_line_split
|
asm.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
# Translation of inline assembly.
*/
use lib;
use middle::trans::build::*;
use middle::trans::callee;
use middle::trans::common::*;
use middle::trans::cleanup;
use middle::trans::cleanup::CleanupMethods;
use middle::trans::expr;
use middle::trans::type_of;
use middle::trans::type_::Type;
use std::c_str::ToCStr;
use std::string::String;
use syntax::ast;
// Take an inline assembly expression and splat it out via LLVM
pub fn trans_inline_asm<'a>(bcx: &'a Block<'a>, ia: &ast::InlineAsm)
-> &'a Block<'a> {
let fcx = bcx.fcx;
let mut bcx = bcx;
let mut constraints = Vec::new();
let mut output_types = Vec::new();
let temp_scope = fcx.push_custom_cleanup_scope();
// Prepare the output operands
let outputs = ia.outputs.iter().map(|&(ref c, ref out)| {
constraints.push((*c).clone());
let out_datum = unpack_datum!(bcx, expr::trans(bcx, &**out));
output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty));
out_datum.val
}).collect::<Vec<_>>();
// Now the input operands
let inputs = ia.inputs.iter().map(|&(ref c, ref input)| {
constraints.push((*c).clone());
let in_datum = unpack_datum!(bcx, expr::trans(bcx, &**input));
unpack_result!(bcx, {
callee::trans_arg_datum(bcx,
expr_ty(bcx, &**input),
in_datum,
cleanup::CustomScope(temp_scope),
callee::DontAutorefArg)
})
}).collect::<Vec<_>>();
// no failure occurred preparing operands, no need to cleanup
fcx.pop_custom_cleanup_scope(temp_scope);
let mut constraints =
String::from_str(constraints.iter()
.map(|s| s.get().to_string())
.collect::<Vec<String>>()
.connect(",")
.as_slice());
let mut clobbers = get_clobbers();
if!ia.clobbers.get().is_empty() &&!clobbers.is_empty() {
clobbers = format!("{},{}", ia.clobbers.get(), clobbers);
} else {
clobbers.push_str(ia.clobbers.get());
}
// Add the clobbers to our constraints list
if clobbers.len()!= 0 && constraints.len()!= 0 {
constraints.push_char(',');
constraints.push_str(clobbers.as_slice());
} else
|
debug!("Asm Constraints: {:?}", constraints.as_slice());
let num_outputs = outputs.len();
// Depending on how many outputs we have, the return type is different
let output_type = if num_outputs == 0 {
Type::void(bcx.ccx())
} else if num_outputs == 1 {
*output_types.get(0)
} else {
Type::struct_(bcx.ccx(), output_types.as_slice(), false)
};
let dialect = match ia.dialect {
ast::AsmAtt => lib::llvm::AD_ATT,
ast::AsmIntel => lib::llvm::AD_Intel
};
let r = ia.asm.get().with_c_str(|a| {
constraints.as_slice().with_c_str(|c| {
InlineAsmCall(bcx,
a,
c,
inputs.as_slice(),
output_type,
ia.volatile,
ia.alignstack,
dialect)
})
});
// Again, based on how many outputs we have
if num_outputs == 1 {
Store(bcx, r, *outputs.get(0));
} else {
for (i, o) in outputs.iter().enumerate() {
let v = ExtractValue(bcx, r, i);
Store(bcx, v, *o);
}
}
return bcx;
}
// Default per-arch clobbers
// Basically what clang does
#[cfg(target_arch = "arm")]
#[cfg(target_arch = "mips")]
#[cfg(target_arch = "mipsel")]
fn get_clobbers() -> String {
"".to_string()
}
#[cfg(target_arch = "x86")]
#[cfg(target_arch = "x86_64")]
fn get_clobbers() -> String {
"~{dirflag},~{fpsr},~{flags}".to_string()
}
|
{
constraints.push_str(clobbers.as_slice());
}
|
conditional_block
|
asm.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
# Translation of inline assembly.
*/
use lib;
use middle::trans::build::*;
use middle::trans::callee;
use middle::trans::common::*;
use middle::trans::cleanup;
use middle::trans::cleanup::CleanupMethods;
use middle::trans::expr;
use middle::trans::type_of;
use middle::trans::type_::Type;
use std::c_str::ToCStr;
use std::string::String;
use syntax::ast;
// Take an inline assembly expression and splat it out via LLVM
pub fn trans_inline_asm<'a>(bcx: &'a Block<'a>, ia: &ast::InlineAsm)
-> &'a Block<'a> {
let fcx = bcx.fcx;
let mut bcx = bcx;
let mut constraints = Vec::new();
let mut output_types = Vec::new();
let temp_scope = fcx.push_custom_cleanup_scope();
// Prepare the output operands
let outputs = ia.outputs.iter().map(|&(ref c, ref out)| {
constraints.push((*c).clone());
let out_datum = unpack_datum!(bcx, expr::trans(bcx, &**out));
output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty));
out_datum.val
}).collect::<Vec<_>>();
// Now the input operands
let inputs = ia.inputs.iter().map(|&(ref c, ref input)| {
|
let in_datum = unpack_datum!(bcx, expr::trans(bcx, &**input));
unpack_result!(bcx, {
callee::trans_arg_datum(bcx,
expr_ty(bcx, &**input),
in_datum,
cleanup::CustomScope(temp_scope),
callee::DontAutorefArg)
})
}).collect::<Vec<_>>();
// no failure occurred preparing operands, no need to cleanup
fcx.pop_custom_cleanup_scope(temp_scope);
let mut constraints =
String::from_str(constraints.iter()
.map(|s| s.get().to_string())
.collect::<Vec<String>>()
.connect(",")
.as_slice());
let mut clobbers = get_clobbers();
if!ia.clobbers.get().is_empty() &&!clobbers.is_empty() {
clobbers = format!("{},{}", ia.clobbers.get(), clobbers);
} else {
clobbers.push_str(ia.clobbers.get());
}
// Add the clobbers to our constraints list
if clobbers.len()!= 0 && constraints.len()!= 0 {
constraints.push_char(',');
constraints.push_str(clobbers.as_slice());
} else {
constraints.push_str(clobbers.as_slice());
}
debug!("Asm Constraints: {:?}", constraints.as_slice());
let num_outputs = outputs.len();
// Depending on how many outputs we have, the return type is different
let output_type = if num_outputs == 0 {
Type::void(bcx.ccx())
} else if num_outputs == 1 {
*output_types.get(0)
} else {
Type::struct_(bcx.ccx(), output_types.as_slice(), false)
};
let dialect = match ia.dialect {
ast::AsmAtt => lib::llvm::AD_ATT,
ast::AsmIntel => lib::llvm::AD_Intel
};
let r = ia.asm.get().with_c_str(|a| {
constraints.as_slice().with_c_str(|c| {
InlineAsmCall(bcx,
a,
c,
inputs.as_slice(),
output_type,
ia.volatile,
ia.alignstack,
dialect)
})
});
// Again, based on how many outputs we have
if num_outputs == 1 {
Store(bcx, r, *outputs.get(0));
} else {
for (i, o) in outputs.iter().enumerate() {
let v = ExtractValue(bcx, r, i);
Store(bcx, v, *o);
}
}
return bcx;
}
// Default per-arch clobbers
// Basically what clang does
#[cfg(target_arch = "arm")]
#[cfg(target_arch = "mips")]
#[cfg(target_arch = "mipsel")]
fn get_clobbers() -> String {
"".to_string()
}
#[cfg(target_arch = "x86")]
#[cfg(target_arch = "x86_64")]
fn get_clobbers() -> String {
"~{dirflag},~{fpsr},~{flags}".to_string()
}
|
constraints.push((*c).clone());
|
random_line_split
|
asm.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
# Translation of inline assembly.
*/
use lib;
use middle::trans::build::*;
use middle::trans::callee;
use middle::trans::common::*;
use middle::trans::cleanup;
use middle::trans::cleanup::CleanupMethods;
use middle::trans::expr;
use middle::trans::type_of;
use middle::trans::type_::Type;
use std::c_str::ToCStr;
use std::string::String;
use syntax::ast;
// Take an inline assembly expression and splat it out via LLVM
pub fn trans_inline_asm<'a>(bcx: &'a Block<'a>, ia: &ast::InlineAsm)
-> &'a Block<'a> {
let fcx = bcx.fcx;
let mut bcx = bcx;
let mut constraints = Vec::new();
let mut output_types = Vec::new();
let temp_scope = fcx.push_custom_cleanup_scope();
// Prepare the output operands
let outputs = ia.outputs.iter().map(|&(ref c, ref out)| {
constraints.push((*c).clone());
let out_datum = unpack_datum!(bcx, expr::trans(bcx, &**out));
output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty));
out_datum.val
}).collect::<Vec<_>>();
// Now the input operands
let inputs = ia.inputs.iter().map(|&(ref c, ref input)| {
constraints.push((*c).clone());
let in_datum = unpack_datum!(bcx, expr::trans(bcx, &**input));
unpack_result!(bcx, {
callee::trans_arg_datum(bcx,
expr_ty(bcx, &**input),
in_datum,
cleanup::CustomScope(temp_scope),
callee::DontAutorefArg)
})
}).collect::<Vec<_>>();
// no failure occurred preparing operands, no need to cleanup
fcx.pop_custom_cleanup_scope(temp_scope);
let mut constraints =
String::from_str(constraints.iter()
.map(|s| s.get().to_string())
.collect::<Vec<String>>()
.connect(",")
.as_slice());
let mut clobbers = get_clobbers();
if!ia.clobbers.get().is_empty() &&!clobbers.is_empty() {
clobbers = format!("{},{}", ia.clobbers.get(), clobbers);
} else {
clobbers.push_str(ia.clobbers.get());
}
// Add the clobbers to our constraints list
if clobbers.len()!= 0 && constraints.len()!= 0 {
constraints.push_char(',');
constraints.push_str(clobbers.as_slice());
} else {
constraints.push_str(clobbers.as_slice());
}
debug!("Asm Constraints: {:?}", constraints.as_slice());
let num_outputs = outputs.len();
// Depending on how many outputs we have, the return type is different
let output_type = if num_outputs == 0 {
Type::void(bcx.ccx())
} else if num_outputs == 1 {
*output_types.get(0)
} else {
Type::struct_(bcx.ccx(), output_types.as_slice(), false)
};
let dialect = match ia.dialect {
ast::AsmAtt => lib::llvm::AD_ATT,
ast::AsmIntel => lib::llvm::AD_Intel
};
let r = ia.asm.get().with_c_str(|a| {
constraints.as_slice().with_c_str(|c| {
InlineAsmCall(bcx,
a,
c,
inputs.as_slice(),
output_type,
ia.volatile,
ia.alignstack,
dialect)
})
});
// Again, based on how many outputs we have
if num_outputs == 1 {
Store(bcx, r, *outputs.get(0));
} else {
for (i, o) in outputs.iter().enumerate() {
let v = ExtractValue(bcx, r, i);
Store(bcx, v, *o);
}
}
return bcx;
}
// Default per-arch clobbers
// Basically what clang does
#[cfg(target_arch = "arm")]
#[cfg(target_arch = "mips")]
#[cfg(target_arch = "mipsel")]
fn
|
() -> String {
"".to_string()
}
#[cfg(target_arch = "x86")]
#[cfg(target_arch = "x86_64")]
fn get_clobbers() -> String {
"~{dirflag},~{fpsr},~{flags}".to_string()
}
|
get_clobbers
|
identifier_name
|
asm.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
# Translation of inline assembly.
*/
use lib;
use middle::trans::build::*;
use middle::trans::callee;
use middle::trans::common::*;
use middle::trans::cleanup;
use middle::trans::cleanup::CleanupMethods;
use middle::trans::expr;
use middle::trans::type_of;
use middle::trans::type_::Type;
use std::c_str::ToCStr;
use std::string::String;
use syntax::ast;
// Take an inline assembly expression and splat it out via LLVM
pub fn trans_inline_asm<'a>(bcx: &'a Block<'a>, ia: &ast::InlineAsm)
-> &'a Block<'a> {
let fcx = bcx.fcx;
let mut bcx = bcx;
let mut constraints = Vec::new();
let mut output_types = Vec::new();
let temp_scope = fcx.push_custom_cleanup_scope();
// Prepare the output operands
let outputs = ia.outputs.iter().map(|&(ref c, ref out)| {
constraints.push((*c).clone());
let out_datum = unpack_datum!(bcx, expr::trans(bcx, &**out));
output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty));
out_datum.val
}).collect::<Vec<_>>();
// Now the input operands
let inputs = ia.inputs.iter().map(|&(ref c, ref input)| {
constraints.push((*c).clone());
let in_datum = unpack_datum!(bcx, expr::trans(bcx, &**input));
unpack_result!(bcx, {
callee::trans_arg_datum(bcx,
expr_ty(bcx, &**input),
in_datum,
cleanup::CustomScope(temp_scope),
callee::DontAutorefArg)
})
}).collect::<Vec<_>>();
// no failure occurred preparing operands, no need to cleanup
fcx.pop_custom_cleanup_scope(temp_scope);
let mut constraints =
String::from_str(constraints.iter()
.map(|s| s.get().to_string())
.collect::<Vec<String>>()
.connect(",")
.as_slice());
let mut clobbers = get_clobbers();
if!ia.clobbers.get().is_empty() &&!clobbers.is_empty() {
clobbers = format!("{},{}", ia.clobbers.get(), clobbers);
} else {
clobbers.push_str(ia.clobbers.get());
}
// Add the clobbers to our constraints list
if clobbers.len()!= 0 && constraints.len()!= 0 {
constraints.push_char(',');
constraints.push_str(clobbers.as_slice());
} else {
constraints.push_str(clobbers.as_slice());
}
debug!("Asm Constraints: {:?}", constraints.as_slice());
let num_outputs = outputs.len();
// Depending on how many outputs we have, the return type is different
let output_type = if num_outputs == 0 {
Type::void(bcx.ccx())
} else if num_outputs == 1 {
*output_types.get(0)
} else {
Type::struct_(bcx.ccx(), output_types.as_slice(), false)
};
let dialect = match ia.dialect {
ast::AsmAtt => lib::llvm::AD_ATT,
ast::AsmIntel => lib::llvm::AD_Intel
};
let r = ia.asm.get().with_c_str(|a| {
constraints.as_slice().with_c_str(|c| {
InlineAsmCall(bcx,
a,
c,
inputs.as_slice(),
output_type,
ia.volatile,
ia.alignstack,
dialect)
})
});
// Again, based on how many outputs we have
if num_outputs == 1 {
Store(bcx, r, *outputs.get(0));
} else {
for (i, o) in outputs.iter().enumerate() {
let v = ExtractValue(bcx, r, i);
Store(bcx, v, *o);
}
}
return bcx;
}
// Default per-arch clobbers
// Basically what clang does
#[cfg(target_arch = "arm")]
#[cfg(target_arch = "mips")]
#[cfg(target_arch = "mipsel")]
fn get_clobbers() -> String
|
#[cfg(target_arch = "x86")]
#[cfg(target_arch = "x86_64")]
fn get_clobbers() -> String {
"~{dirflag},~{fpsr},~{flags}".to_string()
}
|
{
"".to_string()
}
|
identifier_body
|
ethernet.rs
|
use common::slice::GetSlice;
use collections::slice;
use collections::vec::Vec;
use core::mem;
use network::common::*;
#[derive(Copy, Clone)]
#[repr(packed)]
pub struct EthernetIIHeader {
pub dst: MacAddr,
pub src: MacAddr,
pub ethertype: n16,
}
pub struct EthernetII {
pub header: EthernetIIHeader,
pub data: Vec<u8>,
}
impl FromBytes for EthernetII {
fn from_bytes(bytes: Vec<u8>) -> Option<Self> {
if bytes.len() >= mem::size_of::<EthernetIIHeader>()
|
None
}
}
impl ToBytes for EthernetII {
fn to_bytes(&self) -> Vec<u8> {
unsafe {
let header_ptr: *const EthernetIIHeader = &self.header;
let mut ret = Vec::from(slice::from_raw_parts(header_ptr as *const u8,
mem::size_of::<EthernetIIHeader>()));
ret.extend_from_slice(&self.data);
ret
}
}
}
|
{
unsafe {
return Some(EthernetII {
header: *(bytes.as_ptr() as *const EthernetIIHeader),
data: bytes.get_slice(mem::size_of::<EthernetIIHeader>() ..).to_vec(),
});
}
}
|
conditional_block
|
ethernet.rs
|
use common::slice::GetSlice;
use collections::slice;
use collections::vec::Vec;
use core::mem;
use network::common::*;
#[derive(Copy, Clone)]
#[repr(packed)]
pub struct EthernetIIHeader {
pub dst: MacAddr,
pub src: MacAddr,
pub ethertype: n16,
}
pub struct EthernetII {
pub header: EthernetIIHeader,
pub data: Vec<u8>,
}
impl FromBytes for EthernetII {
fn from_bytes(bytes: Vec<u8>) -> Option<Self>
|
}
impl ToBytes for EthernetII {
fn to_bytes(&self) -> Vec<u8> {
unsafe {
let header_ptr: *const EthernetIIHeader = &self.header;
let mut ret = Vec::from(slice::from_raw_parts(header_ptr as *const u8,
mem::size_of::<EthernetIIHeader>()));
ret.extend_from_slice(&self.data);
ret
}
}
}
|
{
if bytes.len() >= mem::size_of::<EthernetIIHeader>() {
unsafe {
return Some(EthernetII {
header: *(bytes.as_ptr() as *const EthernetIIHeader),
data: bytes.get_slice(mem::size_of::<EthernetIIHeader>() ..).to_vec(),
});
}
}
None
}
|
identifier_body
|
ethernet.rs
|
use common::slice::GetSlice;
use collections::slice;
use collections::vec::Vec;
use core::mem;
use network::common::*;
#[derive(Copy, Clone)]
#[repr(packed)]
pub struct
|
{
pub dst: MacAddr,
pub src: MacAddr,
pub ethertype: n16,
}
pub struct EthernetII {
pub header: EthernetIIHeader,
pub data: Vec<u8>,
}
impl FromBytes for EthernetII {
fn from_bytes(bytes: Vec<u8>) -> Option<Self> {
if bytes.len() >= mem::size_of::<EthernetIIHeader>() {
unsafe {
return Some(EthernetII {
header: *(bytes.as_ptr() as *const EthernetIIHeader),
data: bytes.get_slice(mem::size_of::<EthernetIIHeader>()..).to_vec(),
});
}
}
None
}
}
impl ToBytes for EthernetII {
fn to_bytes(&self) -> Vec<u8> {
unsafe {
let header_ptr: *const EthernetIIHeader = &self.header;
let mut ret = Vec::from(slice::from_raw_parts(header_ptr as *const u8,
mem::size_of::<EthernetIIHeader>()));
ret.extend_from_slice(&self.data);
ret
}
}
}
|
EthernetIIHeader
|
identifier_name
|
ethernet.rs
|
use common::slice::GetSlice;
use collections::slice;
use collections::vec::Vec;
use core::mem;
use network::common::*;
#[derive(Copy, Clone)]
#[repr(packed)]
pub struct EthernetIIHeader {
pub dst: MacAddr,
pub src: MacAddr,
pub ethertype: n16,
}
pub struct EthernetII {
pub header: EthernetIIHeader,
pub data: Vec<u8>,
}
impl FromBytes for EthernetII {
fn from_bytes(bytes: Vec<u8>) -> Option<Self> {
if bytes.len() >= mem::size_of::<EthernetIIHeader>() {
unsafe {
return Some(EthernetII {
header: *(bytes.as_ptr() as *const EthernetIIHeader),
data: bytes.get_slice(mem::size_of::<EthernetIIHeader>()..).to_vec(),
});
|
None
}
}
impl ToBytes for EthernetII {
fn to_bytes(&self) -> Vec<u8> {
unsafe {
let header_ptr: *const EthernetIIHeader = &self.header;
let mut ret = Vec::from(slice::from_raw_parts(header_ptr as *const u8,
mem::size_of::<EthernetIIHeader>()));
ret.extend_from_slice(&self.data);
ret
}
}
}
|
}
}
|
random_line_split
|
main.rs
|
//#![deny(warnings)]
// please excuse my poor rust, this is my first time
extern crate toml;
extern crate rustc_serialize;
use std::fs::File;
use std::env;
use std::io;
use std::io::prelude::*;
use toml::Value;
use rustc_serialize::json::Json;
extern crate hyper;
extern crate env_logger;
use hyper::Client;
use hyper::header::Connection;
use std::net::{Ipv4Addr, UdpSocket};
//use rustc_serialize::hex::FromHex;
extern crate sha1;
extern crate msgpack;
fn main() {
let game_name = "Psilly";
let game_version = "0.0.1";
println!("Starting {} Server v.{}...\n", game_name, game_version);
// config file test
let mut args = env::args();
let mut input = String::new();
let filename = if args.len() > 1 {
let name = args.nth(1).unwrap();
File::open(&name).and_then(|mut f| {
f.read_to_string(&mut input)
}).unwrap();
name
} else {
/*
io::stdin().read_to_string(&mut input).unwrap();
"<stdin>".to_string()
*/
println!("Location of config file psillyd.toml must be specified as first argument");
return;
};
let mut parser = toml::Parser::new(&input);
let toml = match parser.parse() {
Some(toml) => toml,
None => {
for err in &parser.errors {
let (loline, locol) = parser.to_linecol(err.lo);
let (hiline, hicol) = parser.to_linecol(err.hi);
println!("{}:{}:{}-{}:{} error: {}",
filename, loline, locol, hiline, hicol, err.desc);
}
return
}
};
|
// http test
//env_logger::init().unwrap();
let url = "http://gsl.pow7.com/announce/".to_string();
let url = url + "?game_name=" + game_name;
let url = url + "&game_version=" + game_version;
//let table = Value::Table(parser.parse());
//let server_name = table.lookup("server.name");
//println!("Name:\n{}", server_name );
let game_mode = "Normal";
let server_port = 42002;
let server_name = "SERVERNAME";
let server_password = "PASSWORD";
let max_players = 512;
let url = url + "&game_mode=" + game_mode;
let url = url + "&port=" + &server_port.to_string();
let url = url + "&name=" + server_name;
let url = url + "&password=" + server_password;
let url = url + "&max_players=" + &max_players.to_string();
println!("Request URL:\n{}", url);
let client = Client::new();
let mut announce_result = client.get(&*url)
.header(Connection::close())
.send().unwrap();
println!("Response: {}", announce_result.status);
//println!("Headers:\n{}", announce_result.headers);
io::copy(&mut announce_result, &mut io::stdout()).unwrap();
// udp ping/pong test
// listen socket
let udp_socket = UdpSocket::bind((Ipv4Addr::new(0, 0, 0, 0), server_port)).unwrap();
// pong socket
let pong_ip = Ipv4Addr::new(69, 172, 205, 90);
let pong_port = 42001;
// pong data
//let mut pong_data = "pong".to_string();
let pong_data = "pong1234512345123451234512345123".to_string();
// test data
//let server_log_id = 262;
//let nonce = "24c148a156046268f0259fde5e37640b8041786d".from_hex();
//let session = "c891a5a5679a10a8fcdb38d959e048aa05c831fb".from_hex();
let mut m = sha1::Sha1::new();
m.update("test".as_bytes());
//m.update(nonce);
//m.update(session);
//let sha1_hash = m.digest();
println!("\nsha1_hash: {}", m.hexdigest());
//let player_count = 42;
// server log id (4)
//pong_data = pong_data + "0262".from_hex();
// sha1 hash (20)
//pong_data = pong_data + sha1_hash;
// player count (2)
//pong_data = pong_data + &player_count.to_string().from_hex();
// send pong
udp_socket.send_to(pong_data.as_bytes(), (pong_ip, pong_port)).unwrap();
// Send a reply to the socket we received data from
//let buf = &mut buf[..amt];
//buf.reverse();
//try!(udp_socket.send_to(buf, &src));
//drop(udp_socket); // close the socket
//let demo_msgpack = msgpack::Encoder::to_msgpack(&as_bytes).ok().unwrap();
//println!("Encoded: {}", wtf.to_string());
// processing incoming udp packets
let mut buf = [0; 48];
println!("Waiting for UDP packets...");
loop {
let result = udp_socket.recv_from(&mut buf);
println!("Got: {:?}", result);
//println!("buf.len(): {:?}", buf.len());
//let pong_data = "pong1234512345123451234512345123".to_string(); // pong + server_log_id(4) + sha1(20) + player_count(2)
udp_socket.send_to(pong_data.as_bytes(), (pong_ip, pong_port)).unwrap();
}
//drop(udp_socket);
}
// used by config file test
fn convert(toml: Value) -> Json {
match toml {
Value::String(s) => Json::String(s),
Value::Integer(i) => Json::I64(i),
Value::Float(f) => Json::F64(f),
Value::Boolean(b) => Json::Boolean(b),
Value::Array(arr) => Json::Array(arr.into_iter().map(convert).collect()),
Value::Table(table) => Json::Object(table.into_iter().map(|(k, v)| {
(k, convert(v))
}).collect()),
Value::Datetime(dt) => Json::String(dt),
}
}
|
let json = convert(Value::Table(toml));
println!("{}", json.pretty());
|
random_line_split
|
main.rs
|
//#![deny(warnings)]
// please excuse my poor rust, this is my first time
extern crate toml;
extern crate rustc_serialize;
use std::fs::File;
use std::env;
use std::io;
use std::io::prelude::*;
use toml::Value;
use rustc_serialize::json::Json;
extern crate hyper;
extern crate env_logger;
use hyper::Client;
use hyper::header::Connection;
use std::net::{Ipv4Addr, UdpSocket};
//use rustc_serialize::hex::FromHex;
extern crate sha1;
extern crate msgpack;
fn main() {
let game_name = "Psilly";
let game_version = "0.0.1";
println!("Starting {} Server v.{}...\n", game_name, game_version);
// config file test
let mut args = env::args();
let mut input = String::new();
let filename = if args.len() > 1 {
let name = args.nth(1).unwrap();
File::open(&name).and_then(|mut f| {
f.read_to_string(&mut input)
}).unwrap();
name
} else {
/*
io::stdin().read_to_string(&mut input).unwrap();
"<stdin>".to_string()
*/
println!("Location of config file psillyd.toml must be specified as first argument");
return;
};
let mut parser = toml::Parser::new(&input);
let toml = match parser.parse() {
Some(toml) => toml,
None => {
for err in &parser.errors {
let (loline, locol) = parser.to_linecol(err.lo);
let (hiline, hicol) = parser.to_linecol(err.hi);
println!("{}:{}:{}-{}:{} error: {}",
filename, loline, locol, hiline, hicol, err.desc);
}
return
}
};
let json = convert(Value::Table(toml));
println!("{}", json.pretty());
// http test
//env_logger::init().unwrap();
let url = "http://gsl.pow7.com/announce/".to_string();
let url = url + "?game_name=" + game_name;
let url = url + "&game_version=" + game_version;
//let table = Value::Table(parser.parse());
//let server_name = table.lookup("server.name");
//println!("Name:\n{}", server_name );
let game_mode = "Normal";
let server_port = 42002;
let server_name = "SERVERNAME";
let server_password = "PASSWORD";
let max_players = 512;
let url = url + "&game_mode=" + game_mode;
let url = url + "&port=" + &server_port.to_string();
let url = url + "&name=" + server_name;
let url = url + "&password=" + server_password;
let url = url + "&max_players=" + &max_players.to_string();
println!("Request URL:\n{}", url);
let client = Client::new();
let mut announce_result = client.get(&*url)
.header(Connection::close())
.send().unwrap();
println!("Response: {}", announce_result.status);
//println!("Headers:\n{}", announce_result.headers);
io::copy(&mut announce_result, &mut io::stdout()).unwrap();
// udp ping/pong test
// listen socket
let udp_socket = UdpSocket::bind((Ipv4Addr::new(0, 0, 0, 0), server_port)).unwrap();
// pong socket
let pong_ip = Ipv4Addr::new(69, 172, 205, 90);
let pong_port = 42001;
// pong data
//let mut pong_data = "pong".to_string();
let pong_data = "pong1234512345123451234512345123".to_string();
// test data
//let server_log_id = 262;
//let nonce = "24c148a156046268f0259fde5e37640b8041786d".from_hex();
//let session = "c891a5a5679a10a8fcdb38d959e048aa05c831fb".from_hex();
let mut m = sha1::Sha1::new();
m.update("test".as_bytes());
//m.update(nonce);
//m.update(session);
//let sha1_hash = m.digest();
println!("\nsha1_hash: {}", m.hexdigest());
//let player_count = 42;
// server log id (4)
//pong_data = pong_data + "0262".from_hex();
// sha1 hash (20)
//pong_data = pong_data + sha1_hash;
// player count (2)
//pong_data = pong_data + &player_count.to_string().from_hex();
// send pong
udp_socket.send_to(pong_data.as_bytes(), (pong_ip, pong_port)).unwrap();
// Send a reply to the socket we received data from
//let buf = &mut buf[..amt];
//buf.reverse();
//try!(udp_socket.send_to(buf, &src));
//drop(udp_socket); // close the socket
//let demo_msgpack = msgpack::Encoder::to_msgpack(&as_bytes).ok().unwrap();
//println!("Encoded: {}", wtf.to_string());
// processing incoming udp packets
let mut buf = [0; 48];
println!("Waiting for UDP packets...");
loop {
let result = udp_socket.recv_from(&mut buf);
println!("Got: {:?}", result);
//println!("buf.len(): {:?}", buf.len());
//let pong_data = "pong1234512345123451234512345123".to_string(); // pong + server_log_id(4) + sha1(20) + player_count(2)
udp_socket.send_to(pong_data.as_bytes(), (pong_ip, pong_port)).unwrap();
}
//drop(udp_socket);
}
// used by config file test
fn
|
(toml: Value) -> Json {
match toml {
Value::String(s) => Json::String(s),
Value::Integer(i) => Json::I64(i),
Value::Float(f) => Json::F64(f),
Value::Boolean(b) => Json::Boolean(b),
Value::Array(arr) => Json::Array(arr.into_iter().map(convert).collect()),
Value::Table(table) => Json::Object(table.into_iter().map(|(k, v)| {
(k, convert(v))
}).collect()),
Value::Datetime(dt) => Json::String(dt),
}
}
|
convert
|
identifier_name
|
main.rs
|
//#![deny(warnings)]
// please excuse my poor rust, this is my first time
extern crate toml;
extern crate rustc_serialize;
use std::fs::File;
use std::env;
use std::io;
use std::io::prelude::*;
use toml::Value;
use rustc_serialize::json::Json;
extern crate hyper;
extern crate env_logger;
use hyper::Client;
use hyper::header::Connection;
use std::net::{Ipv4Addr, UdpSocket};
//use rustc_serialize::hex::FromHex;
extern crate sha1;
extern crate msgpack;
fn main() {
let game_name = "Psilly";
let game_version = "0.0.1";
println!("Starting {} Server v.{}...\n", game_name, game_version);
// config file test
let mut args = env::args();
let mut input = String::new();
let filename = if args.len() > 1 {
let name = args.nth(1).unwrap();
File::open(&name).and_then(|mut f| {
f.read_to_string(&mut input)
}).unwrap();
name
} else {
/*
io::stdin().read_to_string(&mut input).unwrap();
"<stdin>".to_string()
*/
println!("Location of config file psillyd.toml must be specified as first argument");
return;
};
let mut parser = toml::Parser::new(&input);
let toml = match parser.parse() {
Some(toml) => toml,
None => {
for err in &parser.errors {
let (loline, locol) = parser.to_linecol(err.lo);
let (hiline, hicol) = parser.to_linecol(err.hi);
println!("{}:{}:{}-{}:{} error: {}",
filename, loline, locol, hiline, hicol, err.desc);
}
return
}
};
let json = convert(Value::Table(toml));
println!("{}", json.pretty());
// http test
//env_logger::init().unwrap();
let url = "http://gsl.pow7.com/announce/".to_string();
let url = url + "?game_name=" + game_name;
let url = url + "&game_version=" + game_version;
//let table = Value::Table(parser.parse());
//let server_name = table.lookup("server.name");
//println!("Name:\n{}", server_name );
let game_mode = "Normal";
let server_port = 42002;
let server_name = "SERVERNAME";
let server_password = "PASSWORD";
let max_players = 512;
let url = url + "&game_mode=" + game_mode;
let url = url + "&port=" + &server_port.to_string();
let url = url + "&name=" + server_name;
let url = url + "&password=" + server_password;
let url = url + "&max_players=" + &max_players.to_string();
println!("Request URL:\n{}", url);
let client = Client::new();
let mut announce_result = client.get(&*url)
.header(Connection::close())
.send().unwrap();
println!("Response: {}", announce_result.status);
//println!("Headers:\n{}", announce_result.headers);
io::copy(&mut announce_result, &mut io::stdout()).unwrap();
// udp ping/pong test
// listen socket
let udp_socket = UdpSocket::bind((Ipv4Addr::new(0, 0, 0, 0), server_port)).unwrap();
// pong socket
let pong_ip = Ipv4Addr::new(69, 172, 205, 90);
let pong_port = 42001;
// pong data
//let mut pong_data = "pong".to_string();
let pong_data = "pong1234512345123451234512345123".to_string();
// test data
//let server_log_id = 262;
//let nonce = "24c148a156046268f0259fde5e37640b8041786d".from_hex();
//let session = "c891a5a5679a10a8fcdb38d959e048aa05c831fb".from_hex();
let mut m = sha1::Sha1::new();
m.update("test".as_bytes());
//m.update(nonce);
//m.update(session);
//let sha1_hash = m.digest();
println!("\nsha1_hash: {}", m.hexdigest());
//let player_count = 42;
// server log id (4)
//pong_data = pong_data + "0262".from_hex();
// sha1 hash (20)
//pong_data = pong_data + sha1_hash;
// player count (2)
//pong_data = pong_data + &player_count.to_string().from_hex();
// send pong
udp_socket.send_to(pong_data.as_bytes(), (pong_ip, pong_port)).unwrap();
// Send a reply to the socket we received data from
//let buf = &mut buf[..amt];
//buf.reverse();
//try!(udp_socket.send_to(buf, &src));
//drop(udp_socket); // close the socket
//let demo_msgpack = msgpack::Encoder::to_msgpack(&as_bytes).ok().unwrap();
//println!("Encoded: {}", wtf.to_string());
// processing incoming udp packets
let mut buf = [0; 48];
println!("Waiting for UDP packets...");
loop {
let result = udp_socket.recv_from(&mut buf);
println!("Got: {:?}", result);
//println!("buf.len(): {:?}", buf.len());
//let pong_data = "pong1234512345123451234512345123".to_string(); // pong + server_log_id(4) + sha1(20) + player_count(2)
udp_socket.send_to(pong_data.as_bytes(), (pong_ip, pong_port)).unwrap();
}
//drop(udp_socket);
}
// used by config file test
fn convert(toml: Value) -> Json
|
{
match toml {
Value::String(s) => Json::String(s),
Value::Integer(i) => Json::I64(i),
Value::Float(f) => Json::F64(f),
Value::Boolean(b) => Json::Boolean(b),
Value::Array(arr) => Json::Array(arr.into_iter().map(convert).collect()),
Value::Table(table) => Json::Object(table.into_iter().map(|(k, v)| {
(k, convert(v))
}).collect()),
Value::Datetime(dt) => Json::String(dt),
}
}
|
identifier_body
|
|
twitter_client.rs
|
use std::io::Read;
use oauthcli::SignatureMethod::HmacSha1;
use oauthcli::OAuthAuthorizationHeaderBuilder;
use serde_json;
use serde_json::Value;
use reqwest::{Client, Url, StatusCode, Response};
use reqwest::header::{HeaderValue, AUTHORIZATION};
const API_URL: &'static str = "https://api.twitter.com/1.1/statuses/update.json";
pub struct TwitterClient {
consumer_key: String,
consumer_secret: String,
access_key: String,
access_secret: String,
client: Client,
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum TwitterError {
Duplicated,
RateLimitExceeded,
InvalidTweet,
InvalidResponse,
Network,
Unauthorized,
Unknown(String),
}
impl TwitterClient {
pub fn new(consumer_key: String, consumer_secret: String, access_key: String, access_secret: String) -> TwitterClient
{
TwitterClient {
consumer_key: consumer_key,
consumer_secret: consumer_secret,
access_key: access_key,
access_secret: access_secret,
client: Client::new(),
}
}
pub fn is_valid(&self) -> bool
{
// TODO: implement token validation
true
}
fn post(&self, url: &str, args: Vec<(&str, &str)>) -> Result<Response, ()>
{
let mut req = self.client.post(url);
req = req.form(&args);
let oauth_header = OAuthAuthorizationHeaderBuilder::new(
"POST",
&Url::parse(url).expect("must be a valid url string"),
&self.consumer_key,
&self.consumer_secret,
HmacSha1)
.token(&self.access_key, &self.access_secret)
.request_parameters(args.into_iter())
.finish_for_twitter()
.to_string();
req.header(AUTHORIZATION, &oauth_header).send().map_err(|_| ())
}
pub fn update_status(&self, message: &str, in_reply_to: Option<u64>)
-> Result<u64, TwitterError>
|
match id {
Some(186) => Err(TwitterError::InvalidTweet),
Some(187) => Err(TwitterError::Duplicated),
_ => Err(TwitterError::Unknown(body))
}
},
StatusCode::OK => {
let mut body = String::new();
response.read_to_string(&mut body).map_err(|_| TwitterError::Network)?;
let json: Value =
serde_json::from_str(&body).map_err(|_| TwitterError::InvalidResponse)?;
let id = json["id"].as_u64().ok_or(TwitterError::InvalidResponse)?;
Ok(id)
},
StatusCode::TOO_MANY_REQUESTS => Err(TwitterError::RateLimitExceeded),
StatusCode::UNAUTHORIZED => Err(TwitterError::Unauthorized),
_ => Err(TwitterError::Unknown(format!("unknown status: {}", response.status())))
}
}
}
|
{
let prev = in_reply_to.map(|i| i.to_string());
let mut args = vec![("status", message)];
if let Some(prev) = prev.as_ref() {
args.push(("in_reply_to_status_id", prev));
}
let mut response = self.post(API_URL, args).map_err(|_| TwitterError::Network)?;
match response.status() {
StatusCode::FORBIDDEN => {
let mut body = String::new();
response.read_to_string(&mut body).map_err(|_| TwitterError::Network)?;
let json: Option<Value> = serde_json::from_str(&body).ok();
let id = json.and_then(|json| json["errors"][0]["code"].as_i64());
|
identifier_body
|
twitter_client.rs
|
use std::io::Read;
use oauthcli::SignatureMethod::HmacSha1;
use oauthcli::OAuthAuthorizationHeaderBuilder;
use serde_json;
use serde_json::Value;
use reqwest::{Client, Url, StatusCode, Response};
use reqwest::header::{HeaderValue, AUTHORIZATION};
const API_URL: &'static str = "https://api.twitter.com/1.1/statuses/update.json";
pub struct TwitterClient {
consumer_key: String,
consumer_secret: String,
access_key: String,
access_secret: String,
client: Client,
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum TwitterError {
Duplicated,
RateLimitExceeded,
InvalidTweet,
InvalidResponse,
Network,
Unauthorized,
Unknown(String),
}
impl TwitterClient {
pub fn new(consumer_key: String, consumer_secret: String, access_key: String, access_secret: String) -> TwitterClient
{
TwitterClient {
consumer_key: consumer_key,
consumer_secret: consumer_secret,
access_key: access_key,
access_secret: access_secret,
client: Client::new(),
}
}
pub fn is_valid(&self) -> bool
{
// TODO: implement token validation
true
}
fn post(&self, url: &str, args: Vec<(&str, &str)>) -> Result<Response, ()>
{
let mut req = self.client.post(url);
req = req.form(&args);
let oauth_header = OAuthAuthorizationHeaderBuilder::new(
|
&self.consumer_secret,
HmacSha1)
.token(&self.access_key, &self.access_secret)
.request_parameters(args.into_iter())
.finish_for_twitter()
.to_string();
req.header(AUTHORIZATION, &oauth_header).send().map_err(|_| ())
}
pub fn update_status(&self, message: &str, in_reply_to: Option<u64>)
-> Result<u64, TwitterError>
{
let prev = in_reply_to.map(|i| i.to_string());
let mut args = vec![("status", message)];
if let Some(prev) = prev.as_ref() {
args.push(("in_reply_to_status_id", prev));
}
let mut response = self.post(API_URL, args).map_err(|_| TwitterError::Network)?;
match response.status() {
StatusCode::FORBIDDEN => {
let mut body = String::new();
response.read_to_string(&mut body).map_err(|_| TwitterError::Network)?;
let json: Option<Value> = serde_json::from_str(&body).ok();
let id = json.and_then(|json| json["errors"][0]["code"].as_i64());
match id {
Some(186) => Err(TwitterError::InvalidTweet),
Some(187) => Err(TwitterError::Duplicated),
_ => Err(TwitterError::Unknown(body))
}
},
StatusCode::OK => {
let mut body = String::new();
response.read_to_string(&mut body).map_err(|_| TwitterError::Network)?;
let json: Value =
serde_json::from_str(&body).map_err(|_| TwitterError::InvalidResponse)?;
let id = json["id"].as_u64().ok_or(TwitterError::InvalidResponse)?;
Ok(id)
},
StatusCode::TOO_MANY_REQUESTS => Err(TwitterError::RateLimitExceeded),
StatusCode::UNAUTHORIZED => Err(TwitterError::Unauthorized),
_ => Err(TwitterError::Unknown(format!("unknown status: {}", response.status())))
}
}
}
|
"POST",
&Url::parse(url).expect("must be a valid url string"),
&self.consumer_key,
|
random_line_split
|
twitter_client.rs
|
use std::io::Read;
use oauthcli::SignatureMethod::HmacSha1;
use oauthcli::OAuthAuthorizationHeaderBuilder;
use serde_json;
use serde_json::Value;
use reqwest::{Client, Url, StatusCode, Response};
use reqwest::header::{HeaderValue, AUTHORIZATION};
const API_URL: &'static str = "https://api.twitter.com/1.1/statuses/update.json";
pub struct TwitterClient {
consumer_key: String,
consumer_secret: String,
access_key: String,
access_secret: String,
client: Client,
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum TwitterError {
Duplicated,
RateLimitExceeded,
InvalidTweet,
InvalidResponse,
Network,
Unauthorized,
Unknown(String),
}
impl TwitterClient {
pub fn
|
(consumer_key: String, consumer_secret: String, access_key: String, access_secret: String) -> TwitterClient
{
TwitterClient {
consumer_key: consumer_key,
consumer_secret: consumer_secret,
access_key: access_key,
access_secret: access_secret,
client: Client::new(),
}
}
pub fn is_valid(&self) -> bool
{
// TODO: implement token validation
true
}
fn post(&self, url: &str, args: Vec<(&str, &str)>) -> Result<Response, ()>
{
let mut req = self.client.post(url);
req = req.form(&args);
let oauth_header = OAuthAuthorizationHeaderBuilder::new(
"POST",
&Url::parse(url).expect("must be a valid url string"),
&self.consumer_key,
&self.consumer_secret,
HmacSha1)
.token(&self.access_key, &self.access_secret)
.request_parameters(args.into_iter())
.finish_for_twitter()
.to_string();
req.header(AUTHORIZATION, &oauth_header).send().map_err(|_| ())
}
pub fn update_status(&self, message: &str, in_reply_to: Option<u64>)
-> Result<u64, TwitterError>
{
let prev = in_reply_to.map(|i| i.to_string());
let mut args = vec![("status", message)];
if let Some(prev) = prev.as_ref() {
args.push(("in_reply_to_status_id", prev));
}
let mut response = self.post(API_URL, args).map_err(|_| TwitterError::Network)?;
match response.status() {
StatusCode::FORBIDDEN => {
let mut body = String::new();
response.read_to_string(&mut body).map_err(|_| TwitterError::Network)?;
let json: Option<Value> = serde_json::from_str(&body).ok();
let id = json.and_then(|json| json["errors"][0]["code"].as_i64());
match id {
Some(186) => Err(TwitterError::InvalidTweet),
Some(187) => Err(TwitterError::Duplicated),
_ => Err(TwitterError::Unknown(body))
}
},
StatusCode::OK => {
let mut body = String::new();
response.read_to_string(&mut body).map_err(|_| TwitterError::Network)?;
let json: Value =
serde_json::from_str(&body).map_err(|_| TwitterError::InvalidResponse)?;
let id = json["id"].as_u64().ok_or(TwitterError::InvalidResponse)?;
Ok(id)
},
StatusCode::TOO_MANY_REQUESTS => Err(TwitterError::RateLimitExceeded),
StatusCode::UNAUTHORIZED => Err(TwitterError::Unauthorized),
_ => Err(TwitterError::Unknown(format!("unknown status: {}", response.status())))
}
}
}
|
new
|
identifier_name
|
common.rs
|
//! Contains several types used throughout the library.
use std::fmt;
use std::error;
use name::OwnedName as Name;
/// Represents a thing which has a position inside some textual document.
///
/// This trait is implemented by parsers, lexers and errors. It is used primarily to create
/// error objects.
pub trait HasPosition {
/// Returns a line number inside the document.
fn row(&self) -> usize;
/// Returns a column number inside the document.
fn col(&self) -> usize;
}
/// XML parsing error.
///
/// Consists of a row and column reference and a message.
#[derive(Clone, PartialEq, Eq)]
pub struct Error {
row: usize,
col: usize,
msg: String
}
impl fmt::Show for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}:{:?}: {:?}", self.row + 1, self.col + 1, self.msg)
}
}
impl HasPosition for Error {
#[inline]
fn row(&self) -> usize { self.row }
#[inline]
fn col(&self) -> usize { self.col }
}
impl Error {
/// Creates a new error using position information from the provided
/// `HasPosition` object and a message.
#[inline]
pub fn new<O: HasPosition>(o: &O, msg: String) -> Error {
Error { row: o.row(), col: o.col(), msg: msg }
}
/// Creates a new error using provided position information and a message.
#[inline]
pub fn new_full(row: usize, col: usize, msg: String) -> Error {
Error { row: row, col: col, msg: msg }
}
/// Returns a reference to a message which is contained inside this error.
#[inline]
pub fn msg<'a>(&'a self) -> &'a str { self.msg.as_slice() }
}
impl error::Error for Error {
#[inline]
fn description(&self) -> &str { &*self.msg }
fn detail(&self) -> Option<String> { Some(format!("{:?}", self)) }
}
/// XML version enumeration.
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum XmlVersion {
/// XML version 1.0.
Version10,
/// XML version 1.1.
Version11
}
impl fmt::String for XmlVersion {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
XmlVersion::Version10 => write!(f, "1.0"),
XmlVersion::Version11 => write!(f, "1.1")
}
}
}
/// Checks whether the given character is a white space character (`S`)
/// as is defined by XML 1.1 specification, [section 2.3][1].
///
/// [1]: http://www.w3.org/TR/2006/REC-xml11-20060816/#sec-common-syn
pub fn is_whitespace_char(c: char) -> bool {
match c {
'\x20' | '\x09' | '\x0d' | '\x0a' => true,
_ => false
}
}
/// Checks whether the given character is a name start character (`NameStartChar`)
/// as is defined by XML 1.1 specification, [section 2.3][1].
///
/// [1]: http://www.w3.org/TR/2006/REC-xml11-20060816/#sec-common-syn
pub fn is_name_start_char(c: char) -> bool
|
/// Checks whether the given character is a name character (`NameChar`)
/// as is defined by XML 1.1 specification, [section 2.3][1].
///
/// [1]: http://www.w3.org/TR/2006/REC-xml11-20060816/#sec-common-syn
pub fn is_name_char(c: char) -> bool {
match c {
_ if is_name_start_char(c) => true,
'-' | '.' | '0'...'9' | '\u{B7}' |
'\u{300}'...'\u{3F6}' | '\u{203F}'...'\u{2040}' => true,
_ => false
}
}
|
{
match c {
':' | 'A'...'Z' | '_' | 'a'...'z' |
'\u{C0}'...'\u{D6}' | '\u{D8}'...'\u{F6}' | '\u{F8}'...'\u{2FF}' |
'\u{370}'...'\u{37D}' | '\u{37F}'...'\u{1FFF}' |
'\u{200C}'...'\u{200D}' | '\u{2070}'...'\u{218F}' |
'\u{2C00}'...'\u{2FEF}' | '\u{3001}'...'\u{D7FF}' |
'\u{F900}'...'\u{FDCF}' | '\u{FDF0}'...'\u{FFFD}' |
'\u{10000}'...'\u{EFFFF}' => true,
_ => false
}
}
|
identifier_body
|
common.rs
|
//! Contains several types used throughout the library.
use std::fmt;
use std::error;
use name::OwnedName as Name;
/// Represents a thing which has a position inside some textual document.
///
/// This trait is implemented by parsers, lexers and errors. It is used primarily to create
/// error objects.
pub trait HasPosition {
/// Returns a line number inside the document.
fn row(&self) -> usize;
/// Returns a column number inside the document.
fn col(&self) -> usize;
}
/// XML parsing error.
///
/// Consists of a row and column reference and a message.
#[derive(Clone, PartialEq, Eq)]
pub struct Error {
row: usize,
col: usize,
msg: String
}
impl fmt::Show for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}:{:?}: {:?}", self.row + 1, self.col + 1, self.msg)
}
}
impl HasPosition for Error {
#[inline]
fn row(&self) -> usize { self.row }
#[inline]
fn col(&self) -> usize { self.col }
}
impl Error {
/// Creates a new error using position information from the provided
/// `HasPosition` object and a message.
#[inline]
pub fn
|
<O: HasPosition>(o: &O, msg: String) -> Error {
Error { row: o.row(), col: o.col(), msg: msg }
}
/// Creates a new error using provided position information and a message.
#[inline]
pub fn new_full(row: usize, col: usize, msg: String) -> Error {
Error { row: row, col: col, msg: msg }
}
/// Returns a reference to a message which is contained inside this error.
#[inline]
pub fn msg<'a>(&'a self) -> &'a str { self.msg.as_slice() }
}
impl error::Error for Error {
#[inline]
fn description(&self) -> &str { &*self.msg }
fn detail(&self) -> Option<String> { Some(format!("{:?}", self)) }
}
/// XML version enumeration.
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum XmlVersion {
/// XML version 1.0.
Version10,
/// XML version 1.1.
Version11
}
impl fmt::String for XmlVersion {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
XmlVersion::Version10 => write!(f, "1.0"),
XmlVersion::Version11 => write!(f, "1.1")
}
}
}
/// Checks whether the given character is a white space character (`S`)
/// as is defined by XML 1.1 specification, [section 2.3][1].
///
/// [1]: http://www.w3.org/TR/2006/REC-xml11-20060816/#sec-common-syn
pub fn is_whitespace_char(c: char) -> bool {
match c {
'\x20' | '\x09' | '\x0d' | '\x0a' => true,
_ => false
}
}
/// Checks whether the given character is a name start character (`NameStartChar`)
/// as is defined by XML 1.1 specification, [section 2.3][1].
///
/// [1]: http://www.w3.org/TR/2006/REC-xml11-20060816/#sec-common-syn
pub fn is_name_start_char(c: char) -> bool {
match c {
':' | 'A'...'Z' | '_' | 'a'...'z' |
'\u{C0}'...'\u{D6}' | '\u{D8}'...'\u{F6}' | '\u{F8}'...'\u{2FF}' |
'\u{370}'...'\u{37D}' | '\u{37F}'...'\u{1FFF}' |
'\u{200C}'...'\u{200D}' | '\u{2070}'...'\u{218F}' |
'\u{2C00}'...'\u{2FEF}' | '\u{3001}'...'\u{D7FF}' |
'\u{F900}'...'\u{FDCF}' | '\u{FDF0}'...'\u{FFFD}' |
'\u{10000}'...'\u{EFFFF}' => true,
_ => false
}
}
/// Checks whether the given character is a name character (`NameChar`)
/// as is defined by XML 1.1 specification, [section 2.3][1].
///
/// [1]: http://www.w3.org/TR/2006/REC-xml11-20060816/#sec-common-syn
pub fn is_name_char(c: char) -> bool {
match c {
_ if is_name_start_char(c) => true,
'-' | '.' | '0'...'9' | '\u{B7}' |
'\u{300}'...'\u{3F6}' | '\u{203F}'...'\u{2040}' => true,
_ => false
}
}
|
new
|
identifier_name
|
common.rs
|
//! Contains several types used throughout the library.
use std::fmt;
use std::error;
use name::OwnedName as Name;
/// Represents a thing which has a position inside some textual document.
///
/// This trait is implemented by parsers, lexers and errors. It is used primarily to create
/// error objects.
pub trait HasPosition {
/// Returns a line number inside the document.
fn row(&self) -> usize;
/// Returns a column number inside the document.
fn col(&self) -> usize;
}
/// XML parsing error.
///
/// Consists of a row and column reference and a message.
#[derive(Clone, PartialEq, Eq)]
pub struct Error {
row: usize,
col: usize,
msg: String
}
impl fmt::Show for Error {
|
}
}
impl HasPosition for Error {
#[inline]
fn row(&self) -> usize { self.row }
#[inline]
fn col(&self) -> usize { self.col }
}
impl Error {
/// Creates a new error using position information from the provided
/// `HasPosition` object and a message.
#[inline]
pub fn new<O: HasPosition>(o: &O, msg: String) -> Error {
Error { row: o.row(), col: o.col(), msg: msg }
}
/// Creates a new error using provided position information and a message.
#[inline]
pub fn new_full(row: usize, col: usize, msg: String) -> Error {
Error { row: row, col: col, msg: msg }
}
/// Returns a reference to a message which is contained inside this error.
#[inline]
pub fn msg<'a>(&'a self) -> &'a str { self.msg.as_slice() }
}
impl error::Error for Error {
#[inline]
fn description(&self) -> &str { &*self.msg }
fn detail(&self) -> Option<String> { Some(format!("{:?}", self)) }
}
/// XML version enumeration.
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum XmlVersion {
/// XML version 1.0.
Version10,
/// XML version 1.1.
Version11
}
impl fmt::String for XmlVersion {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
XmlVersion::Version10 => write!(f, "1.0"),
XmlVersion::Version11 => write!(f, "1.1")
}
}
}
/// Checks whether the given character is a white space character (`S`)
/// as is defined by XML 1.1 specification, [section 2.3][1].
///
/// [1]: http://www.w3.org/TR/2006/REC-xml11-20060816/#sec-common-syn
pub fn is_whitespace_char(c: char) -> bool {
match c {
'\x20' | '\x09' | '\x0d' | '\x0a' => true,
_ => false
}
}
/// Checks whether the given character is a name start character (`NameStartChar`)
/// as is defined by XML 1.1 specification, [section 2.3][1].
///
/// [1]: http://www.w3.org/TR/2006/REC-xml11-20060816/#sec-common-syn
pub fn is_name_start_char(c: char) -> bool {
match c {
':' | 'A'...'Z' | '_' | 'a'...'z' |
'\u{C0}'...'\u{D6}' | '\u{D8}'...'\u{F6}' | '\u{F8}'...'\u{2FF}' |
'\u{370}'...'\u{37D}' | '\u{37F}'...'\u{1FFF}' |
'\u{200C}'...'\u{200D}' | '\u{2070}'...'\u{218F}' |
'\u{2C00}'...'\u{2FEF}' | '\u{3001}'...'\u{D7FF}' |
'\u{F900}'...'\u{FDCF}' | '\u{FDF0}'...'\u{FFFD}' |
'\u{10000}'...'\u{EFFFF}' => true,
_ => false
}
}
/// Checks whether the given character is a name character (`NameChar`)
/// as is defined by XML 1.1 specification, [section 2.3][1].
///
/// [1]: http://www.w3.org/TR/2006/REC-xml11-20060816/#sec-common-syn
pub fn is_name_char(c: char) -> bool {
match c {
_ if is_name_start_char(c) => true,
'-' | '.' | '0'...'9' | '\u{B7}' |
'\u{300}'...'\u{3F6}' | '\u{203F}'...'\u{2040}' => true,
_ => false
}
}
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}:{:?}: {:?}", self.row + 1, self.col + 1, self.msg)
|
random_line_split
|
lib.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Bindings to libuv, along with the default implementation of `std::rt::rtio`.
UV types consist of the event loop (Loop), Watchers, Requests and
Callbacks.
Watchers and Requests encapsulate pointers to uv *handles*, which have
subtyping relationships with each other. This subtyping is reflected
in the bindings with explicit or implicit coercions. For example, an
upcast from TcpWatcher to StreamWatcher is done with
`tcp_watcher.as_stream()`. In other cases a callback on a specific
type of watcher will be passed a watcher of a supertype.
Currently all use of Request types (connect/write requests) are
encapsulated in the bindings and don't need to be dealt with by the
caller.
# Safety note
Due to the complex lifecycle of uv handles, as well as compiler bugs,
this module is not memory safe and requires explicit memory management,
via `close` and `delete` methods.
*/
#![crate_name = "rustuv"]
#![experimental]
#![license = "MIT/ASL2"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/master/",
html_playground_url = "http://play.rust-lang.org/")]
#![feature(macro_rules, unsafe_destructor)]
#![deny(unused_result, unused_must_use)]
#![allow(visible_private_types)]
#![reexport_test_harness_main = "test_main"]
#[cfg(test)] extern crate green;
#[cfg(test)] extern crate debug;
#[cfg(test)] extern crate realrustuv = "rustuv";
extern crate libc;
extern crate alloc;
use libc::{c_int, c_void};
use std::fmt;
use std::mem;
use std::ptr;
use std::string;
use std::rt::local::Local;
use std::rt::rtio;
use std::rt::rtio::{IoResult, IoError};
use std::rt::task::{BlockedTask, Task};
use std::task;
pub use self::async::AsyncWatcher;
pub use self::file::{FsRequest, FileWatcher};
pub use self::idle::IdleWatcher;
pub use self::net::{TcpWatcher, TcpListener, TcpAcceptor, UdpWatcher};
pub use self::pipe::{PipeWatcher, PipeListener, PipeAcceptor};
pub use self::process::Process;
pub use self::signal::SignalWatcher;
pub use self::timer::TimerWatcher;
pub use self::tty::TtyWatcher;
// Run tests with libgreen instead of libnative.
#[cfg(test)] #[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, event_loop, test_main)
}
mod macros;
mod access;
mod timeout;
mod homing;
mod queue;
mod rc;
pub mod uvio;
pub mod uvll;
pub mod file;
pub mod net;
pub mod idle;
pub mod timer;
pub mod async;
pub mod addrinfo;
pub mod process;
pub mod pipe;
pub mod tty;
pub mod signal;
pub mod stream;
/// Creates a new event loop which is powered by libuv
///
/// This function is used in tandem with libgreen's `PoolConfig` type as a value
/// for the `event_loop_factory` field. Using this function as the event loop
/// factory will power programs with libuv and enable green threading.
///
/// # Example
///
/// ```
/// extern crate rustuv;
/// extern crate green;
///
/// #[start]
/// fn start(argc: int, argv: *const *const u8) -> int {
/// green::start(argc, argv, rustuv::event_loop, main)
/// }
///
/// fn main() {
/// // this code is running inside of a green task powered by libuv
/// }
/// ```
pub fn event_loop() -> Box<rtio::EventLoop + Send> {
box uvio::UvEventLoop::new() as Box<rtio::EventLoop + Send>
}
/// A type that wraps a uv handle
pub trait UvHandle<T> {
fn uv_handle(&self) -> *mut T;
fn uv_loop(&self) -> Loop {
Loop::wrap(unsafe { uvll::get_loop_for_uv_handle(self.uv_handle()) })
}
// FIXME(#8888) dummy self
fn alloc(_: Option<Self>, ty: uvll::uv_handle_type) -> *mut T {
unsafe {
let handle = uvll::malloc_handle(ty);
assert!(!handle.is_null());
handle as *mut T
}
}
unsafe fn from_uv_handle<'a>(h: &'a *mut T) -> &'a mut Self {
mem::transmute(uvll::get_data_for_uv_handle(*h))
}
fn install(self: Box<Self>) -> Box<Self> {
unsafe {
let myptr = mem::transmute::<&Box<Self>, &*mut u8>(&self);
uvll::set_data_for_uv_handle(self.uv_handle(), *myptr);
}
self
}
fn close_async_(&mut self) {
// we used malloc to allocate all handles, so we must always have at
// least a callback to free all the handles we allocated.
extern fn close_cb(handle: *mut uvll::uv_handle_t) {
unsafe { uvll::free_handle(handle) }
}
unsafe {
uvll::set_data_for_uv_handle(self.uv_handle(), ptr::mut_null::<()>());
uvll::uv_close(self.uv_handle() as *mut uvll::uv_handle_t, close_cb)
}
}
fn close(&mut self)
|
}
}
}
}
pub struct ForbidSwitch {
msg: &'static str,
io: uint,
}
impl ForbidSwitch {
fn new(s: &'static str) -> ForbidSwitch {
ForbidSwitch {
msg: s,
io: homing::local_id(),
}
}
}
impl Drop for ForbidSwitch {
fn drop(&mut self) {
assert!(self.io == homing::local_id(),
"didn't want a scheduler switch: {}",
self.msg);
}
}
pub struct ForbidUnwind {
msg: &'static str,
failing_before: bool,
}
impl ForbidUnwind {
fn new(s: &'static str) -> ForbidUnwind {
ForbidUnwind {
msg: s, failing_before: task::failing(),
}
}
}
impl Drop for ForbidUnwind {
fn drop(&mut self) {
assert!(self.failing_before == task::failing(),
"didn't want an unwind during: {}", self.msg);
}
}
fn wait_until_woken_after(slot: *mut Option<BlockedTask>,
loop_: &Loop,
f: ||) {
let _f = ForbidUnwind::new("wait_until_woken_after");
unsafe {
assert!((*slot).is_none());
let task: Box<Task> = Local::take();
loop_.modify_blockers(1);
task.deschedule(1, |task| {
*slot = Some(task);
f();
Ok(())
});
loop_.modify_blockers(-1);
}
}
fn wakeup(slot: &mut Option<BlockedTask>) {
assert!(slot.is_some());
let _ = slot.take_unwrap().wake().map(|t| t.reawaken());
}
pub struct Request {
pub handle: *mut uvll::uv_req_t,
defused: bool,
}
impl Request {
pub fn new(ty: uvll::uv_req_type) -> Request {
unsafe {
let handle = uvll::malloc_req(ty);
uvll::set_data_for_req(handle, ptr::mut_null::<()>());
Request::wrap(handle)
}
}
pub fn wrap(handle: *mut uvll::uv_req_t) -> Request {
Request { handle: handle, defused: false }
}
pub fn set_data<T>(&self, t: *mut T) {
unsafe { uvll::set_data_for_req(self.handle, t) }
}
pub unsafe fn get_data<T>(&self) -> &'static mut T {
let data = uvll::get_data_for_req(self.handle);
assert!(data!= ptr::mut_null());
mem::transmute(data)
}
// This function should be used when the request handle has been given to an
// underlying uv function, and the uv function has succeeded. This means
// that uv will at some point invoke the callback, and in the meantime we
// can't deallocate the handle because libuv could be using it.
//
// This is still a problem in blocking situations due to linked failure. In
// the connection callback the handle should be re-wrapped with the `wrap`
// function to ensure its destruction.
pub fn defuse(&mut self) {
self.defused = true;
}
}
impl Drop for Request {
fn drop(&mut self) {
if!self.defused {
unsafe { uvll::free_req(self.handle) }
}
}
}
/// FIXME: Loop(*handle) is buggy with destructors. Normal structs
/// with dtors may not be destructured, but tuple structs can,
/// but the results are not correct.
pub struct Loop {
handle: *mut uvll::uv_loop_t
}
impl Loop {
pub fn new() -> Loop {
let handle = unsafe { uvll::loop_new() };
assert!(handle.is_not_null());
unsafe { uvll::set_data_for_uv_loop(handle, 0 as *mut c_void) }
Loop::wrap(handle)
}
pub fn wrap(handle: *mut uvll::uv_loop_t) -> Loop { Loop { handle: handle } }
pub fn run(&mut self) {
assert_eq!(unsafe { uvll::uv_run(self.handle, uvll::RUN_DEFAULT) }, 0);
}
pub fn close(&mut self) {
unsafe { uvll::uv_loop_delete(self.handle) };
}
// The 'data' field of the uv_loop_t is used to count the number of tasks
// that are currently blocked waiting for I/O to complete.
fn modify_blockers(&self, amt: uint) {
unsafe {
let cur = uvll::get_data_for_uv_loop(self.handle) as uint;
uvll::set_data_for_uv_loop(self.handle, (cur + amt) as *mut c_void)
}
}
fn get_blockers(&self) -> uint {
unsafe { uvll::get_data_for_uv_loop(self.handle) as uint }
}
}
// FIXME: Need to define the error constants like EOF so they can be
// compared to the UvError type
pub struct UvError(c_int);
impl UvError {
pub fn name(&self) -> String {
unsafe {
let inner = match self { &UvError(a) => a };
let name_str = uvll::uv_err_name(inner);
assert!(name_str.is_not_null());
string::raw::from_buf(name_str as *const u8)
}
}
pub fn desc(&self) -> String {
unsafe {
let inner = match self { &UvError(a) => a };
let desc_str = uvll::uv_strerror(inner);
assert!(desc_str.is_not_null());
string::raw::from_buf(desc_str as *const u8)
}
}
pub fn is_eof(&self) -> bool {
let UvError(handle) = *self;
handle == uvll::EOF
}
}
impl fmt::Show for UvError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}: {}", self.name(), self.desc())
}
}
#[test]
fn error_smoke_test() {
let err: UvError = UvError(uvll::EOF);
assert_eq!(err.to_string(), "EOF: end of file".to_string());
}
#[cfg(unix)]
pub fn uv_error_to_io_error(uverr: UvError) -> IoError {
let UvError(errcode) = uverr;
IoError {
code: if errcode == uvll::EOF {libc::EOF as uint} else {-errcode as uint},
extra: 0,
detail: Some(uverr.desc()),
}
}
#[cfg(windows)]
pub fn uv_error_to_io_error(uverr: UvError) -> IoError {
let UvError(errcode) = uverr;
IoError {
code: match errcode {
uvll::EOF => libc::EOF,
uvll::EACCES => libc::ERROR_ACCESS_DENIED,
uvll::ECONNREFUSED => libc::WSAECONNREFUSED,
uvll::ECONNRESET => libc::WSAECONNRESET,
uvll::ENOTCONN => libc::WSAENOTCONN,
uvll::ENOENT => libc::ERROR_FILE_NOT_FOUND,
uvll::EPIPE => libc::ERROR_NO_DATA,
uvll::ECONNABORTED => libc::WSAECONNABORTED,
uvll::EADDRNOTAVAIL => libc::WSAEADDRNOTAVAIL,
uvll::ECANCELED => libc::ERROR_OPERATION_ABORTED,
uvll::EADDRINUSE => libc::WSAEADDRINUSE,
uvll::EPERM => libc::ERROR_ACCESS_DENIED,
err => {
uvdebug!("uverr.code {}", err as int);
// FIXME: Need to map remaining uv error types
-1
}
} as uint,
extra: 0,
detail: Some(uverr.desc()),
}
}
/// Given a uv error code, convert a callback status to a UvError
pub fn status_to_maybe_uv_error(status: c_int) -> Option<UvError> {
if status >= 0 {
None
} else {
Some(UvError(status))
}
}
pub fn status_to_io_result(status: c_int) -> IoResult<()> {
if status >= 0 {Ok(())} else {Err(uv_error_to_io_error(UvError(status)))}
}
/// The uv buffer type
pub type Buf = uvll::uv_buf_t;
pub fn empty_buf() -> Buf {
uvll::uv_buf_t {
base: ptr::mut_null(),
len: 0,
}
}
/// Borrow a slice to a Buf
pub fn slice_to_uv_buf(v: &[u8]) -> Buf {
let data = v.as_ptr();
uvll::uv_buf_t { base: data as *mut u8, len: v.len() as uvll::uv_buf_len_t }
}
// This function is full of lies!
#[cfg(test)]
fn local_loop() -> &'static mut uvio::UvIoFactory {
use std::raw::TraitObject;
unsafe {
mem::transmute({
let mut task = Local::borrow(None::<Task>);
let mut io = task.local_io().unwrap();
let obj: TraitObject =
mem::transmute(io.get());
obj.data
})
}
}
#[cfg(test)]
fn next_test_ip4() -> std::rt::rtio::SocketAddr {
use std::io;
use std::rt::rtio;
let io::net::ip::SocketAddr { ip, port } = io::test::next_test_ip4();
let ip = match ip {
io::net::ip::Ipv4Addr(a, b, c, d) => rtio::Ipv4Addr(a, b, c, d),
_ => unreachable!(),
};
rtio::SocketAddr { ip: ip, port: port }
}
#[cfg(test)]
fn next_test_ip6() -> std::rt::rtio::SocketAddr {
use std::io;
use std::rt::rtio;
let io::net::ip::SocketAddr { ip, port } = io::test::next_test_ip6();
let ip = match ip {
io::net::ip::Ipv6Addr(a, b, c, d, e, f, g, h) =>
rtio::Ipv6Addr(a, b, c, d, e, f, g, h),
_ => unreachable!(),
};
rtio::SocketAddr { ip: ip, port: port }
}
#[cfg(test)]
mod test {
use std::mem::transmute;
use std::rt::thread::Thread;
use super::{slice_to_uv_buf, Loop};
#[test]
fn test_slice_to_uv_buf() {
let slice = [0,.. 20];
let buf = slice_to_uv_buf(slice);
assert_eq!(buf.len, 20);
unsafe {
let base = transmute::<*mut u8, *mut u8>(buf.base);
(*base) = 1;
(*base.offset(1)) = 2;
}
assert!(slice[0] == 1);
assert!(slice[1] == 2);
}
#[test]
fn loop_smoke_test() {
Thread::start(proc() {
let mut loop_ = Loop::new();
loop_.run();
loop_.close();
}).join();
}
}
|
{
let mut slot = None;
unsafe {
uvll::uv_close(self.uv_handle() as *mut uvll::uv_handle_t, close_cb);
uvll::set_data_for_uv_handle(self.uv_handle(),
ptr::mut_null::<()>());
wait_until_woken_after(&mut slot, &self.uv_loop(), || {
uvll::set_data_for_uv_handle(self.uv_handle(), &mut slot);
})
}
extern fn close_cb(handle: *mut uvll::uv_handle_t) {
unsafe {
let data = uvll::get_data_for_uv_handle(handle);
uvll::free_handle(handle);
if data == ptr::mut_null() { return }
let slot: &mut Option<BlockedTask> = mem::transmute(data);
wakeup(slot);
|
identifier_body
|
lib.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Bindings to libuv, along with the default implementation of `std::rt::rtio`.
UV types consist of the event loop (Loop), Watchers, Requests and
Callbacks.
Watchers and Requests encapsulate pointers to uv *handles*, which have
subtyping relationships with each other. This subtyping is reflected
in the bindings with explicit or implicit coercions. For example, an
upcast from TcpWatcher to StreamWatcher is done with
`tcp_watcher.as_stream()`. In other cases a callback on a specific
type of watcher will be passed a watcher of a supertype.
Currently all use of Request types (connect/write requests) are
encapsulated in the bindings and don't need to be dealt with by the
caller.
# Safety note
Due to the complex lifecycle of uv handles, as well as compiler bugs,
this module is not memory safe and requires explicit memory management,
via `close` and `delete` methods.
*/
#![crate_name = "rustuv"]
#![experimental]
#![license = "MIT/ASL2"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/master/",
html_playground_url = "http://play.rust-lang.org/")]
#![feature(macro_rules, unsafe_destructor)]
#![deny(unused_result, unused_must_use)]
#![allow(visible_private_types)]
#![reexport_test_harness_main = "test_main"]
#[cfg(test)] extern crate green;
#[cfg(test)] extern crate debug;
#[cfg(test)] extern crate realrustuv = "rustuv";
extern crate libc;
extern crate alloc;
use libc::{c_int, c_void};
use std::fmt;
use std::mem;
use std::ptr;
use std::string;
use std::rt::local::Local;
use std::rt::rtio;
use std::rt::rtio::{IoResult, IoError};
use std::rt::task::{BlockedTask, Task};
use std::task;
pub use self::async::AsyncWatcher;
pub use self::file::{FsRequest, FileWatcher};
pub use self::idle::IdleWatcher;
pub use self::net::{TcpWatcher, TcpListener, TcpAcceptor, UdpWatcher};
pub use self::pipe::{PipeWatcher, PipeListener, PipeAcceptor};
pub use self::process::Process;
pub use self::signal::SignalWatcher;
pub use self::timer::TimerWatcher;
pub use self::tty::TtyWatcher;
// Run tests with libgreen instead of libnative.
#[cfg(test)] #[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, event_loop, test_main)
}
mod macros;
mod access;
mod timeout;
mod homing;
mod queue;
mod rc;
pub mod uvio;
pub mod uvll;
pub mod file;
pub mod net;
pub mod idle;
pub mod timer;
pub mod async;
pub mod addrinfo;
pub mod process;
pub mod pipe;
pub mod tty;
pub mod signal;
pub mod stream;
/// Creates a new event loop which is powered by libuv
///
/// This function is used in tandem with libgreen's `PoolConfig` type as a value
/// for the `event_loop_factory` field. Using this function as the event loop
/// factory will power programs with libuv and enable green threading.
///
/// # Example
///
/// ```
/// extern crate rustuv;
/// extern crate green;
///
/// #[start]
/// fn start(argc: int, argv: *const *const u8) -> int {
/// green::start(argc, argv, rustuv::event_loop, main)
/// }
///
/// fn main() {
/// // this code is running inside of a green task powered by libuv
/// }
/// ```
pub fn event_loop() -> Box<rtio::EventLoop + Send> {
box uvio::UvEventLoop::new() as Box<rtio::EventLoop + Send>
}
/// A type that wraps a uv handle
pub trait UvHandle<T> {
fn uv_handle(&self) -> *mut T;
fn uv_loop(&self) -> Loop {
Loop::wrap(unsafe { uvll::get_loop_for_uv_handle(self.uv_handle()) })
}
// FIXME(#8888) dummy self
fn alloc(_: Option<Self>, ty: uvll::uv_handle_type) -> *mut T {
unsafe {
let handle = uvll::malloc_handle(ty);
assert!(!handle.is_null());
handle as *mut T
}
}
unsafe fn from_uv_handle<'a>(h: &'a *mut T) -> &'a mut Self {
mem::transmute(uvll::get_data_for_uv_handle(*h))
}
fn install(self: Box<Self>) -> Box<Self> {
unsafe {
let myptr = mem::transmute::<&Box<Self>, &*mut u8>(&self);
uvll::set_data_for_uv_handle(self.uv_handle(), *myptr);
}
self
}
fn close_async_(&mut self) {
// we used malloc to allocate all handles, so we must always have at
// least a callback to free all the handles we allocated.
extern fn close_cb(handle: *mut uvll::uv_handle_t) {
unsafe { uvll::free_handle(handle) }
}
unsafe {
uvll::set_data_for_uv_handle(self.uv_handle(), ptr::mut_null::<()>());
uvll::uv_close(self.uv_handle() as *mut uvll::uv_handle_t, close_cb)
}
}
fn
|
(&mut self) {
let mut slot = None;
unsafe {
uvll::uv_close(self.uv_handle() as *mut uvll::uv_handle_t, close_cb);
uvll::set_data_for_uv_handle(self.uv_handle(),
ptr::mut_null::<()>());
wait_until_woken_after(&mut slot, &self.uv_loop(), || {
uvll::set_data_for_uv_handle(self.uv_handle(), &mut slot);
})
}
extern fn close_cb(handle: *mut uvll::uv_handle_t) {
unsafe {
let data = uvll::get_data_for_uv_handle(handle);
uvll::free_handle(handle);
if data == ptr::mut_null() { return }
let slot: &mut Option<BlockedTask> = mem::transmute(data);
wakeup(slot);
}
}
}
}
pub struct ForbidSwitch {
msg: &'static str,
io: uint,
}
impl ForbidSwitch {
fn new(s: &'static str) -> ForbidSwitch {
ForbidSwitch {
msg: s,
io: homing::local_id(),
}
}
}
impl Drop for ForbidSwitch {
fn drop(&mut self) {
assert!(self.io == homing::local_id(),
"didn't want a scheduler switch: {}",
self.msg);
}
}
pub struct ForbidUnwind {
msg: &'static str,
failing_before: bool,
}
impl ForbidUnwind {
fn new(s: &'static str) -> ForbidUnwind {
ForbidUnwind {
msg: s, failing_before: task::failing(),
}
}
}
impl Drop for ForbidUnwind {
fn drop(&mut self) {
assert!(self.failing_before == task::failing(),
"didn't want an unwind during: {}", self.msg);
}
}
fn wait_until_woken_after(slot: *mut Option<BlockedTask>,
loop_: &Loop,
f: ||) {
let _f = ForbidUnwind::new("wait_until_woken_after");
unsafe {
assert!((*slot).is_none());
let task: Box<Task> = Local::take();
loop_.modify_blockers(1);
task.deschedule(1, |task| {
*slot = Some(task);
f();
Ok(())
});
loop_.modify_blockers(-1);
}
}
fn wakeup(slot: &mut Option<BlockedTask>) {
assert!(slot.is_some());
let _ = slot.take_unwrap().wake().map(|t| t.reawaken());
}
pub struct Request {
pub handle: *mut uvll::uv_req_t,
defused: bool,
}
impl Request {
pub fn new(ty: uvll::uv_req_type) -> Request {
unsafe {
let handle = uvll::malloc_req(ty);
uvll::set_data_for_req(handle, ptr::mut_null::<()>());
Request::wrap(handle)
}
}
pub fn wrap(handle: *mut uvll::uv_req_t) -> Request {
Request { handle: handle, defused: false }
}
pub fn set_data<T>(&self, t: *mut T) {
unsafe { uvll::set_data_for_req(self.handle, t) }
}
pub unsafe fn get_data<T>(&self) -> &'static mut T {
let data = uvll::get_data_for_req(self.handle);
assert!(data!= ptr::mut_null());
mem::transmute(data)
}
// This function should be used when the request handle has been given to an
// underlying uv function, and the uv function has succeeded. This means
// that uv will at some point invoke the callback, and in the meantime we
// can't deallocate the handle because libuv could be using it.
//
// This is still a problem in blocking situations due to linked failure. In
// the connection callback the handle should be re-wrapped with the `wrap`
// function to ensure its destruction.
pub fn defuse(&mut self) {
self.defused = true;
}
}
impl Drop for Request {
fn drop(&mut self) {
if!self.defused {
unsafe { uvll::free_req(self.handle) }
}
}
}
/// FIXME: Loop(*handle) is buggy with destructors. Normal structs
/// with dtors may not be destructured, but tuple structs can,
/// but the results are not correct.
pub struct Loop {
handle: *mut uvll::uv_loop_t
}
impl Loop {
pub fn new() -> Loop {
let handle = unsafe { uvll::loop_new() };
assert!(handle.is_not_null());
unsafe { uvll::set_data_for_uv_loop(handle, 0 as *mut c_void) }
Loop::wrap(handle)
}
pub fn wrap(handle: *mut uvll::uv_loop_t) -> Loop { Loop { handle: handle } }
pub fn run(&mut self) {
assert_eq!(unsafe { uvll::uv_run(self.handle, uvll::RUN_DEFAULT) }, 0);
}
pub fn close(&mut self) {
unsafe { uvll::uv_loop_delete(self.handle) };
}
// The 'data' field of the uv_loop_t is used to count the number of tasks
// that are currently blocked waiting for I/O to complete.
fn modify_blockers(&self, amt: uint) {
unsafe {
let cur = uvll::get_data_for_uv_loop(self.handle) as uint;
uvll::set_data_for_uv_loop(self.handle, (cur + amt) as *mut c_void)
}
}
fn get_blockers(&self) -> uint {
unsafe { uvll::get_data_for_uv_loop(self.handle) as uint }
}
}
// FIXME: Need to define the error constants like EOF so they can be
// compared to the UvError type
pub struct UvError(c_int);
impl UvError {
pub fn name(&self) -> String {
unsafe {
let inner = match self { &UvError(a) => a };
let name_str = uvll::uv_err_name(inner);
assert!(name_str.is_not_null());
string::raw::from_buf(name_str as *const u8)
}
}
pub fn desc(&self) -> String {
unsafe {
let inner = match self { &UvError(a) => a };
let desc_str = uvll::uv_strerror(inner);
assert!(desc_str.is_not_null());
string::raw::from_buf(desc_str as *const u8)
}
}
pub fn is_eof(&self) -> bool {
let UvError(handle) = *self;
handle == uvll::EOF
}
}
impl fmt::Show for UvError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}: {}", self.name(), self.desc())
}
}
#[test]
fn error_smoke_test() {
let err: UvError = UvError(uvll::EOF);
assert_eq!(err.to_string(), "EOF: end of file".to_string());
}
#[cfg(unix)]
pub fn uv_error_to_io_error(uverr: UvError) -> IoError {
let UvError(errcode) = uverr;
IoError {
code: if errcode == uvll::EOF {libc::EOF as uint} else {-errcode as uint},
extra: 0,
detail: Some(uverr.desc()),
}
}
#[cfg(windows)]
pub fn uv_error_to_io_error(uverr: UvError) -> IoError {
let UvError(errcode) = uverr;
IoError {
code: match errcode {
uvll::EOF => libc::EOF,
uvll::EACCES => libc::ERROR_ACCESS_DENIED,
uvll::ECONNREFUSED => libc::WSAECONNREFUSED,
uvll::ECONNRESET => libc::WSAECONNRESET,
uvll::ENOTCONN => libc::WSAENOTCONN,
uvll::ENOENT => libc::ERROR_FILE_NOT_FOUND,
uvll::EPIPE => libc::ERROR_NO_DATA,
uvll::ECONNABORTED => libc::WSAECONNABORTED,
uvll::EADDRNOTAVAIL => libc::WSAEADDRNOTAVAIL,
uvll::ECANCELED => libc::ERROR_OPERATION_ABORTED,
uvll::EADDRINUSE => libc::WSAEADDRINUSE,
uvll::EPERM => libc::ERROR_ACCESS_DENIED,
err => {
uvdebug!("uverr.code {}", err as int);
// FIXME: Need to map remaining uv error types
-1
}
} as uint,
extra: 0,
detail: Some(uverr.desc()),
}
}
/// Given a uv error code, convert a callback status to a UvError
pub fn status_to_maybe_uv_error(status: c_int) -> Option<UvError> {
if status >= 0 {
None
} else {
Some(UvError(status))
}
}
pub fn status_to_io_result(status: c_int) -> IoResult<()> {
if status >= 0 {Ok(())} else {Err(uv_error_to_io_error(UvError(status)))}
}
/// The uv buffer type
pub type Buf = uvll::uv_buf_t;
pub fn empty_buf() -> Buf {
uvll::uv_buf_t {
base: ptr::mut_null(),
len: 0,
}
}
/// Borrow a slice to a Buf
pub fn slice_to_uv_buf(v: &[u8]) -> Buf {
let data = v.as_ptr();
uvll::uv_buf_t { base: data as *mut u8, len: v.len() as uvll::uv_buf_len_t }
}
// This function is full of lies!
#[cfg(test)]
fn local_loop() -> &'static mut uvio::UvIoFactory {
use std::raw::TraitObject;
unsafe {
mem::transmute({
let mut task = Local::borrow(None::<Task>);
let mut io = task.local_io().unwrap();
let obj: TraitObject =
mem::transmute(io.get());
obj.data
})
}
}
#[cfg(test)]
fn next_test_ip4() -> std::rt::rtio::SocketAddr {
use std::io;
use std::rt::rtio;
let io::net::ip::SocketAddr { ip, port } = io::test::next_test_ip4();
let ip = match ip {
io::net::ip::Ipv4Addr(a, b, c, d) => rtio::Ipv4Addr(a, b, c, d),
_ => unreachable!(),
};
rtio::SocketAddr { ip: ip, port: port }
}
#[cfg(test)]
fn next_test_ip6() -> std::rt::rtio::SocketAddr {
use std::io;
use std::rt::rtio;
let io::net::ip::SocketAddr { ip, port } = io::test::next_test_ip6();
let ip = match ip {
io::net::ip::Ipv6Addr(a, b, c, d, e, f, g, h) =>
rtio::Ipv6Addr(a, b, c, d, e, f, g, h),
_ => unreachable!(),
};
rtio::SocketAddr { ip: ip, port: port }
}
#[cfg(test)]
mod test {
use std::mem::transmute;
use std::rt::thread::Thread;
use super::{slice_to_uv_buf, Loop};
#[test]
fn test_slice_to_uv_buf() {
let slice = [0,.. 20];
let buf = slice_to_uv_buf(slice);
assert_eq!(buf.len, 20);
unsafe {
let base = transmute::<*mut u8, *mut u8>(buf.base);
(*base) = 1;
(*base.offset(1)) = 2;
}
assert!(slice[0] == 1);
assert!(slice[1] == 2);
}
#[test]
fn loop_smoke_test() {
Thread::start(proc() {
let mut loop_ = Loop::new();
loop_.run();
loop_.close();
}).join();
}
}
|
close
|
identifier_name
|
lib.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Bindings to libuv, along with the default implementation of `std::rt::rtio`.
UV types consist of the event loop (Loop), Watchers, Requests and
Callbacks.
Watchers and Requests encapsulate pointers to uv *handles*, which have
subtyping relationships with each other. This subtyping is reflected
in the bindings with explicit or implicit coercions. For example, an
upcast from TcpWatcher to StreamWatcher is done with
`tcp_watcher.as_stream()`. In other cases a callback on a specific
type of watcher will be passed a watcher of a supertype.
Currently all use of Request types (connect/write requests) are
encapsulated in the bindings and don't need to be dealt with by the
caller.
# Safety note
Due to the complex lifecycle of uv handles, as well as compiler bugs,
this module is not memory safe and requires explicit memory management,
via `close` and `delete` methods.
*/
#![crate_name = "rustuv"]
#![experimental]
#![license = "MIT/ASL2"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/master/",
html_playground_url = "http://play.rust-lang.org/")]
#![feature(macro_rules, unsafe_destructor)]
#![deny(unused_result, unused_must_use)]
#![allow(visible_private_types)]
#![reexport_test_harness_main = "test_main"]
#[cfg(test)] extern crate green;
#[cfg(test)] extern crate debug;
#[cfg(test)] extern crate realrustuv = "rustuv";
extern crate libc;
extern crate alloc;
use libc::{c_int, c_void};
use std::fmt;
use std::mem;
use std::ptr;
use std::string;
use std::rt::local::Local;
use std::rt::rtio;
use std::rt::rtio::{IoResult, IoError};
use std::rt::task::{BlockedTask, Task};
use std::task;
pub use self::async::AsyncWatcher;
pub use self::file::{FsRequest, FileWatcher};
pub use self::idle::IdleWatcher;
pub use self::net::{TcpWatcher, TcpListener, TcpAcceptor, UdpWatcher};
pub use self::pipe::{PipeWatcher, PipeListener, PipeAcceptor};
pub use self::process::Process;
pub use self::signal::SignalWatcher;
pub use self::timer::TimerWatcher;
pub use self::tty::TtyWatcher;
// Run tests with libgreen instead of libnative.
#[cfg(test)] #[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, event_loop, test_main)
}
mod macros;
mod access;
mod timeout;
mod homing;
mod queue;
mod rc;
pub mod uvio;
pub mod uvll;
pub mod file;
pub mod net;
pub mod idle;
pub mod timer;
pub mod async;
pub mod addrinfo;
pub mod process;
pub mod pipe;
pub mod tty;
pub mod signal;
pub mod stream;
/// Creates a new event loop which is powered by libuv
///
/// This function is used in tandem with libgreen's `PoolConfig` type as a value
/// for the `event_loop_factory` field. Using this function as the event loop
/// factory will power programs with libuv and enable green threading.
///
/// # Example
///
/// ```
/// extern crate rustuv;
/// extern crate green;
///
/// #[start]
/// fn start(argc: int, argv: *const *const u8) -> int {
/// green::start(argc, argv, rustuv::event_loop, main)
/// }
///
/// fn main() {
/// // this code is running inside of a green task powered by libuv
/// }
/// ```
pub fn event_loop() -> Box<rtio::EventLoop + Send> {
box uvio::UvEventLoop::new() as Box<rtio::EventLoop + Send>
}
/// A type that wraps a uv handle
pub trait UvHandle<T> {
fn uv_handle(&self) -> *mut T;
fn uv_loop(&self) -> Loop {
Loop::wrap(unsafe { uvll::get_loop_for_uv_handle(self.uv_handle()) })
}
// FIXME(#8888) dummy self
fn alloc(_: Option<Self>, ty: uvll::uv_handle_type) -> *mut T {
unsafe {
let handle = uvll::malloc_handle(ty);
assert!(!handle.is_null());
handle as *mut T
}
}
unsafe fn from_uv_handle<'a>(h: &'a *mut T) -> &'a mut Self {
mem::transmute(uvll::get_data_for_uv_handle(*h))
}
fn install(self: Box<Self>) -> Box<Self> {
unsafe {
let myptr = mem::transmute::<&Box<Self>, &*mut u8>(&self);
uvll::set_data_for_uv_handle(self.uv_handle(), *myptr);
}
self
}
fn close_async_(&mut self) {
// we used malloc to allocate all handles, so we must always have at
// least a callback to free all the handles we allocated.
extern fn close_cb(handle: *mut uvll::uv_handle_t) {
unsafe { uvll::free_handle(handle) }
}
unsafe {
uvll::set_data_for_uv_handle(self.uv_handle(), ptr::mut_null::<()>());
uvll::uv_close(self.uv_handle() as *mut uvll::uv_handle_t, close_cb)
}
}
fn close(&mut self) {
let mut slot = None;
unsafe {
uvll::uv_close(self.uv_handle() as *mut uvll::uv_handle_t, close_cb);
uvll::set_data_for_uv_handle(self.uv_handle(),
ptr::mut_null::<()>());
wait_until_woken_after(&mut slot, &self.uv_loop(), || {
uvll::set_data_for_uv_handle(self.uv_handle(), &mut slot);
})
}
extern fn close_cb(handle: *mut uvll::uv_handle_t) {
unsafe {
let data = uvll::get_data_for_uv_handle(handle);
uvll::free_handle(handle);
if data == ptr::mut_null() { return }
let slot: &mut Option<BlockedTask> = mem::transmute(data);
wakeup(slot);
}
}
}
}
pub struct ForbidSwitch {
msg: &'static str,
io: uint,
}
impl ForbidSwitch {
fn new(s: &'static str) -> ForbidSwitch {
ForbidSwitch {
msg: s,
io: homing::local_id(),
}
}
}
impl Drop for ForbidSwitch {
fn drop(&mut self) {
assert!(self.io == homing::local_id(),
"didn't want a scheduler switch: {}",
self.msg);
}
}
pub struct ForbidUnwind {
msg: &'static str,
failing_before: bool,
}
impl ForbidUnwind {
fn new(s: &'static str) -> ForbidUnwind {
ForbidUnwind {
msg: s, failing_before: task::failing(),
}
}
}
impl Drop for ForbidUnwind {
fn drop(&mut self) {
assert!(self.failing_before == task::failing(),
"didn't want an unwind during: {}", self.msg);
}
}
fn wait_until_woken_after(slot: *mut Option<BlockedTask>,
loop_: &Loop,
f: ||) {
let _f = ForbidUnwind::new("wait_until_woken_after");
unsafe {
|
loop_.modify_blockers(1);
task.deschedule(1, |task| {
*slot = Some(task);
f();
Ok(())
});
loop_.modify_blockers(-1);
}
}
fn wakeup(slot: &mut Option<BlockedTask>) {
assert!(slot.is_some());
let _ = slot.take_unwrap().wake().map(|t| t.reawaken());
}
pub struct Request {
pub handle: *mut uvll::uv_req_t,
defused: bool,
}
impl Request {
pub fn new(ty: uvll::uv_req_type) -> Request {
unsafe {
let handle = uvll::malloc_req(ty);
uvll::set_data_for_req(handle, ptr::mut_null::<()>());
Request::wrap(handle)
}
}
pub fn wrap(handle: *mut uvll::uv_req_t) -> Request {
Request { handle: handle, defused: false }
}
pub fn set_data<T>(&self, t: *mut T) {
unsafe { uvll::set_data_for_req(self.handle, t) }
}
pub unsafe fn get_data<T>(&self) -> &'static mut T {
let data = uvll::get_data_for_req(self.handle);
assert!(data!= ptr::mut_null());
mem::transmute(data)
}
// This function should be used when the request handle has been given to an
// underlying uv function, and the uv function has succeeded. This means
// that uv will at some point invoke the callback, and in the meantime we
// can't deallocate the handle because libuv could be using it.
//
// This is still a problem in blocking situations due to linked failure. In
// the connection callback the handle should be re-wrapped with the `wrap`
// function to ensure its destruction.
pub fn defuse(&mut self) {
self.defused = true;
}
}
impl Drop for Request {
fn drop(&mut self) {
if!self.defused {
unsafe { uvll::free_req(self.handle) }
}
}
}
/// FIXME: Loop(*handle) is buggy with destructors. Normal structs
/// with dtors may not be destructured, but tuple structs can,
/// but the results are not correct.
pub struct Loop {
handle: *mut uvll::uv_loop_t
}
impl Loop {
pub fn new() -> Loop {
let handle = unsafe { uvll::loop_new() };
assert!(handle.is_not_null());
unsafe { uvll::set_data_for_uv_loop(handle, 0 as *mut c_void) }
Loop::wrap(handle)
}
pub fn wrap(handle: *mut uvll::uv_loop_t) -> Loop { Loop { handle: handle } }
pub fn run(&mut self) {
assert_eq!(unsafe { uvll::uv_run(self.handle, uvll::RUN_DEFAULT) }, 0);
}
pub fn close(&mut self) {
unsafe { uvll::uv_loop_delete(self.handle) };
}
// The 'data' field of the uv_loop_t is used to count the number of tasks
// that are currently blocked waiting for I/O to complete.
fn modify_blockers(&self, amt: uint) {
unsafe {
let cur = uvll::get_data_for_uv_loop(self.handle) as uint;
uvll::set_data_for_uv_loop(self.handle, (cur + amt) as *mut c_void)
}
}
fn get_blockers(&self) -> uint {
unsafe { uvll::get_data_for_uv_loop(self.handle) as uint }
}
}
// FIXME: Need to define the error constants like EOF so they can be
// compared to the UvError type
pub struct UvError(c_int);
impl UvError {
pub fn name(&self) -> String {
unsafe {
let inner = match self { &UvError(a) => a };
let name_str = uvll::uv_err_name(inner);
assert!(name_str.is_not_null());
string::raw::from_buf(name_str as *const u8)
}
}
pub fn desc(&self) -> String {
unsafe {
let inner = match self { &UvError(a) => a };
let desc_str = uvll::uv_strerror(inner);
assert!(desc_str.is_not_null());
string::raw::from_buf(desc_str as *const u8)
}
}
pub fn is_eof(&self) -> bool {
let UvError(handle) = *self;
handle == uvll::EOF
}
}
impl fmt::Show for UvError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}: {}", self.name(), self.desc())
}
}
#[test]
fn error_smoke_test() {
let err: UvError = UvError(uvll::EOF);
assert_eq!(err.to_string(), "EOF: end of file".to_string());
}
#[cfg(unix)]
pub fn uv_error_to_io_error(uverr: UvError) -> IoError {
let UvError(errcode) = uverr;
IoError {
code: if errcode == uvll::EOF {libc::EOF as uint} else {-errcode as uint},
extra: 0,
detail: Some(uverr.desc()),
}
}
#[cfg(windows)]
pub fn uv_error_to_io_error(uverr: UvError) -> IoError {
let UvError(errcode) = uverr;
IoError {
code: match errcode {
uvll::EOF => libc::EOF,
uvll::EACCES => libc::ERROR_ACCESS_DENIED,
uvll::ECONNREFUSED => libc::WSAECONNREFUSED,
uvll::ECONNRESET => libc::WSAECONNRESET,
uvll::ENOTCONN => libc::WSAENOTCONN,
uvll::ENOENT => libc::ERROR_FILE_NOT_FOUND,
uvll::EPIPE => libc::ERROR_NO_DATA,
uvll::ECONNABORTED => libc::WSAECONNABORTED,
uvll::EADDRNOTAVAIL => libc::WSAEADDRNOTAVAIL,
uvll::ECANCELED => libc::ERROR_OPERATION_ABORTED,
uvll::EADDRINUSE => libc::WSAEADDRINUSE,
uvll::EPERM => libc::ERROR_ACCESS_DENIED,
err => {
uvdebug!("uverr.code {}", err as int);
// FIXME: Need to map remaining uv error types
-1
}
} as uint,
extra: 0,
detail: Some(uverr.desc()),
}
}
/// Given a uv error code, convert a callback status to a UvError
pub fn status_to_maybe_uv_error(status: c_int) -> Option<UvError> {
if status >= 0 {
None
} else {
Some(UvError(status))
}
}
pub fn status_to_io_result(status: c_int) -> IoResult<()> {
if status >= 0 {Ok(())} else {Err(uv_error_to_io_error(UvError(status)))}
}
/// The uv buffer type
pub type Buf = uvll::uv_buf_t;
pub fn empty_buf() -> Buf {
uvll::uv_buf_t {
base: ptr::mut_null(),
len: 0,
}
}
/// Borrow a slice to a Buf
pub fn slice_to_uv_buf(v: &[u8]) -> Buf {
let data = v.as_ptr();
uvll::uv_buf_t { base: data as *mut u8, len: v.len() as uvll::uv_buf_len_t }
}
// This function is full of lies!
#[cfg(test)]
fn local_loop() -> &'static mut uvio::UvIoFactory {
use std::raw::TraitObject;
unsafe {
mem::transmute({
let mut task = Local::borrow(None::<Task>);
let mut io = task.local_io().unwrap();
let obj: TraitObject =
mem::transmute(io.get());
obj.data
})
}
}
#[cfg(test)]
fn next_test_ip4() -> std::rt::rtio::SocketAddr {
use std::io;
use std::rt::rtio;
let io::net::ip::SocketAddr { ip, port } = io::test::next_test_ip4();
let ip = match ip {
io::net::ip::Ipv4Addr(a, b, c, d) => rtio::Ipv4Addr(a, b, c, d),
_ => unreachable!(),
};
rtio::SocketAddr { ip: ip, port: port }
}
#[cfg(test)]
fn next_test_ip6() -> std::rt::rtio::SocketAddr {
use std::io;
use std::rt::rtio;
let io::net::ip::SocketAddr { ip, port } = io::test::next_test_ip6();
let ip = match ip {
io::net::ip::Ipv6Addr(a, b, c, d, e, f, g, h) =>
rtio::Ipv6Addr(a, b, c, d, e, f, g, h),
_ => unreachable!(),
};
rtio::SocketAddr { ip: ip, port: port }
}
#[cfg(test)]
mod test {
use std::mem::transmute;
use std::rt::thread::Thread;
use super::{slice_to_uv_buf, Loop};
#[test]
fn test_slice_to_uv_buf() {
let slice = [0,.. 20];
let buf = slice_to_uv_buf(slice);
assert_eq!(buf.len, 20);
unsafe {
let base = transmute::<*mut u8, *mut u8>(buf.base);
(*base) = 1;
(*base.offset(1)) = 2;
}
assert!(slice[0] == 1);
assert!(slice[1] == 2);
}
#[test]
fn loop_smoke_test() {
Thread::start(proc() {
let mut loop_ = Loop::new();
loop_.run();
loop_.close();
}).join();
}
}
|
assert!((*slot).is_none());
let task: Box<Task> = Local::take();
|
random_line_split
|
uievent.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use dom::bindings::codegen::Bindings::UIEventBinding;
use dom::bindings::codegen::Bindings::UIEventBinding::UIEventMethods;
use dom::bindings::codegen::InheritTypes::{EventCast, UIEventDerived};
use dom::bindings::error::Fallible;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{MutNullableJS, JSRef, RootedReference, Temporary};
use dom::bindings::utils::reflect_dom_object;
use dom::event::{Event, EventTypeId, EventBubbles, EventCancelable};
use dom::window::Window;
use util::str::DOMString;
use std::cell::Cell;
use std::default::Default;
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#interface-UIEvent
#[dom_struct]
pub struct UIEvent {
event: Event,
view: MutNullableJS<Window>,
detail: Cell<i32>
}
impl UIEventDerived for Event {
fn is_uievent(&self) -> bool {
*self.type_id() == EventTypeId::UIEvent
}
}
impl UIEvent {
pub fn new_inherited(type_id: EventTypeId) -> UIEvent
|
pub fn new_uninitialized(window: JSRef<Window>) -> Temporary<UIEvent> {
reflect_dom_object(box UIEvent::new_inherited(EventTypeId::UIEvent),
GlobalRef::Window(window),
UIEventBinding::Wrap)
}
pub fn new(window: JSRef<Window>,
type_: DOMString,
can_bubble: EventBubbles,
cancelable: EventCancelable,
view: Option<JSRef<Window>>,
detail: i32) -> Temporary<UIEvent> {
let ev = UIEvent::new_uninitialized(window).root();
ev.r().InitUIEvent(type_, can_bubble == EventBubbles::Bubbles, cancelable == EventCancelable::Cancelable, view, detail);
Temporary::from_rooted(ev.r())
}
pub fn Constructor(global: GlobalRef,
type_: DOMString,
init: &UIEventBinding::UIEventInit) -> Fallible<Temporary<UIEvent>> {
let bubbles = if init.parent.bubbles { EventBubbles::Bubbles } else { EventBubbles::DoesNotBubble };
let cancelable = if init.parent.cancelable { EventCancelable::Cancelable } else { EventCancelable::NotCancelable };
let event = UIEvent::new(global.as_window(), type_,
bubbles, cancelable,
init.view.r(), init.detail);
Ok(event)
}
}
impl<'a> UIEventMethods for JSRef<'a, UIEvent> {
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#widl-UIEvent-view
fn GetView(self) -> Option<Temporary<Window>> {
self.view.get()
}
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#widl-UIEvent-detail
fn Detail(self) -> i32 {
self.detail.get()
}
fn InitUIEvent(self,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<JSRef<Window>>,
detail: i32) {
let event: JSRef<Event> = EventCast::from_ref(self);
if event.dispatching() {
return;
}
event.InitEvent(type_, can_bubble, cancelable);
self.view.assign(view);
self.detail.set(detail);
}
}
|
{
UIEvent {
event: Event::new_inherited(type_id),
view: Default::default(),
detail: Cell::new(0),
}
}
|
identifier_body
|
uievent.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use dom::bindings::codegen::Bindings::UIEventBinding;
use dom::bindings::codegen::Bindings::UIEventBinding::UIEventMethods;
use dom::bindings::codegen::InheritTypes::{EventCast, UIEventDerived};
use dom::bindings::error::Fallible;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{MutNullableJS, JSRef, RootedReference, Temporary};
use dom::bindings::utils::reflect_dom_object;
use dom::event::{Event, EventTypeId, EventBubbles, EventCancelable};
use dom::window::Window;
use util::str::DOMString;
use std::cell::Cell;
use std::default::Default;
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#interface-UIEvent
#[dom_struct]
pub struct UIEvent {
event: Event,
view: MutNullableJS<Window>,
detail: Cell<i32>
}
impl UIEventDerived for Event {
fn is_uievent(&self) -> bool {
*self.type_id() == EventTypeId::UIEvent
}
}
impl UIEvent {
pub fn new_inherited(type_id: EventTypeId) -> UIEvent {
UIEvent {
event: Event::new_inherited(type_id),
view: Default::default(),
detail: Cell::new(0),
}
}
pub fn new_uninitialized(window: JSRef<Window>) -> Temporary<UIEvent> {
reflect_dom_object(box UIEvent::new_inherited(EventTypeId::UIEvent),
GlobalRef::Window(window),
UIEventBinding::Wrap)
}
pub fn new(window: JSRef<Window>,
type_: DOMString,
can_bubble: EventBubbles,
cancelable: EventCancelable,
view: Option<JSRef<Window>>,
detail: i32) -> Temporary<UIEvent> {
let ev = UIEvent::new_uninitialized(window).root();
ev.r().InitUIEvent(type_, can_bubble == EventBubbles::Bubbles, cancelable == EventCancelable::Cancelable, view, detail);
Temporary::from_rooted(ev.r())
}
pub fn Constructor(global: GlobalRef,
type_: DOMString,
init: &UIEventBinding::UIEventInit) -> Fallible<Temporary<UIEvent>> {
let bubbles = if init.parent.bubbles { EventBubbles::Bubbles } else { EventBubbles::DoesNotBubble };
let cancelable = if init.parent.cancelable { EventCancelable::Cancelable } else { EventCancelable::NotCancelable };
let event = UIEvent::new(global.as_window(), type_,
bubbles, cancelable,
init.view.r(), init.detail);
Ok(event)
}
}
impl<'a> UIEventMethods for JSRef<'a, UIEvent> {
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#widl-UIEvent-view
fn GetView(self) -> Option<Temporary<Window>> {
self.view.get()
}
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#widl-UIEvent-detail
fn
|
(self) -> i32 {
self.detail.get()
}
fn InitUIEvent(self,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<JSRef<Window>>,
detail: i32) {
let event: JSRef<Event> = EventCast::from_ref(self);
if event.dispatching() {
return;
}
event.InitEvent(type_, can_bubble, cancelable);
self.view.assign(view);
self.detail.set(detail);
}
}
|
Detail
|
identifier_name
|
uievent.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use dom::bindings::codegen::Bindings::UIEventBinding;
use dom::bindings::codegen::Bindings::UIEventBinding::UIEventMethods;
use dom::bindings::codegen::InheritTypes::{EventCast, UIEventDerived};
use dom::bindings::error::Fallible;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{MutNullableJS, JSRef, RootedReference, Temporary};
use dom::bindings::utils::reflect_dom_object;
use dom::event::{Event, EventTypeId, EventBubbles, EventCancelable};
use dom::window::Window;
use util::str::DOMString;
use std::cell::Cell;
use std::default::Default;
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#interface-UIEvent
#[dom_struct]
pub struct UIEvent {
event: Event,
view: MutNullableJS<Window>,
detail: Cell<i32>
}
impl UIEventDerived for Event {
fn is_uievent(&self) -> bool {
*self.type_id() == EventTypeId::UIEvent
}
}
impl UIEvent {
pub fn new_inherited(type_id: EventTypeId) -> UIEvent {
UIEvent {
event: Event::new_inherited(type_id),
view: Default::default(),
detail: Cell::new(0),
}
}
pub fn new_uninitialized(window: JSRef<Window>) -> Temporary<UIEvent> {
reflect_dom_object(box UIEvent::new_inherited(EventTypeId::UIEvent),
GlobalRef::Window(window),
UIEventBinding::Wrap)
}
pub fn new(window: JSRef<Window>,
type_: DOMString,
can_bubble: EventBubbles,
cancelable: EventCancelable,
view: Option<JSRef<Window>>,
detail: i32) -> Temporary<UIEvent> {
let ev = UIEvent::new_uninitialized(window).root();
ev.r().InitUIEvent(type_, can_bubble == EventBubbles::Bubbles, cancelable == EventCancelable::Cancelable, view, detail);
Temporary::from_rooted(ev.r())
}
pub fn Constructor(global: GlobalRef,
type_: DOMString,
init: &UIEventBinding::UIEventInit) -> Fallible<Temporary<UIEvent>> {
let bubbles = if init.parent.bubbles { EventBubbles::Bubbles } else { EventBubbles::DoesNotBubble };
let cancelable = if init.parent.cancelable { EventCancelable::Cancelable } else
|
;
let event = UIEvent::new(global.as_window(), type_,
bubbles, cancelable,
init.view.r(), init.detail);
Ok(event)
}
}
impl<'a> UIEventMethods for JSRef<'a, UIEvent> {
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#widl-UIEvent-view
fn GetView(self) -> Option<Temporary<Window>> {
self.view.get()
}
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#widl-UIEvent-detail
fn Detail(self) -> i32 {
self.detail.get()
}
fn InitUIEvent(self,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<JSRef<Window>>,
detail: i32) {
let event: JSRef<Event> = EventCast::from_ref(self);
if event.dispatching() {
return;
}
event.InitEvent(type_, can_bubble, cancelable);
self.view.assign(view);
self.detail.set(detail);
}
}
|
{ EventCancelable::NotCancelable }
|
conditional_block
|
uievent.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use dom::bindings::codegen::Bindings::UIEventBinding;
use dom::bindings::codegen::Bindings::UIEventBinding::UIEventMethods;
use dom::bindings::codegen::InheritTypes::{EventCast, UIEventDerived};
use dom::bindings::error::Fallible;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{MutNullableJS, JSRef, RootedReference, Temporary};
use dom::bindings::utils::reflect_dom_object;
use dom::event::{Event, EventTypeId, EventBubbles, EventCancelable};
use dom::window::Window;
use util::str::DOMString;
use std::cell::Cell;
use std::default::Default;
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#interface-UIEvent
#[dom_struct]
pub struct UIEvent {
event: Event,
view: MutNullableJS<Window>,
detail: Cell<i32>
}
impl UIEventDerived for Event {
fn is_uievent(&self) -> bool {
*self.type_id() == EventTypeId::UIEvent
}
}
impl UIEvent {
pub fn new_inherited(type_id: EventTypeId) -> UIEvent {
UIEvent {
event: Event::new_inherited(type_id),
view: Default::default(),
detail: Cell::new(0),
}
}
pub fn new_uninitialized(window: JSRef<Window>) -> Temporary<UIEvent> {
reflect_dom_object(box UIEvent::new_inherited(EventTypeId::UIEvent),
GlobalRef::Window(window),
UIEventBinding::Wrap)
}
pub fn new(window: JSRef<Window>,
type_: DOMString,
can_bubble: EventBubbles,
cancelable: EventCancelable,
view: Option<JSRef<Window>>,
detail: i32) -> Temporary<UIEvent> {
let ev = UIEvent::new_uninitialized(window).root();
ev.r().InitUIEvent(type_, can_bubble == EventBubbles::Bubbles, cancelable == EventCancelable::Cancelable, view, detail);
Temporary::from_rooted(ev.r())
}
pub fn Constructor(global: GlobalRef,
type_: DOMString,
init: &UIEventBinding::UIEventInit) -> Fallible<Temporary<UIEvent>> {
let bubbles = if init.parent.bubbles { EventBubbles::Bubbles } else { EventBubbles::DoesNotBubble };
let cancelable = if init.parent.cancelable { EventCancelable::Cancelable } else { EventCancelable::NotCancelable };
let event = UIEvent::new(global.as_window(), type_,
bubbles, cancelable,
init.view.r(), init.detail);
Ok(event)
}
}
impl<'a> UIEventMethods for JSRef<'a, UIEvent> {
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#widl-UIEvent-view
fn GetView(self) -> Option<Temporary<Window>> {
self.view.get()
}
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#widl-UIEvent-detail
fn Detail(self) -> i32 {
self.detail.get()
}
fn InitUIEvent(self,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<JSRef<Window>>,
detail: i32) {
|
event.InitEvent(type_, can_bubble, cancelable);
self.view.assign(view);
self.detail.set(detail);
}
}
|
let event: JSRef<Event> = EventCast::from_ref(self);
if event.dispatching() {
return;
}
|
random_line_split
|
crud.rs
|
#![cfg_attr(all(feature = "unstable", test), feature(test))]
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate arthas_derive;
extern crate rand;
extern crate arthas;
extern crate env_logger;
#[path = "../tests/common/mod.rs"]
pub mod common;
#[path = "../tests/model/mod.rs"]
pub mod model;
#[cfg(all(feature = "unstable", test))]
mod benches {
extern crate test;
use model::*;
use super::common::setup;
#[bench]
fn bench_a_insert(b: &mut test::Bencher) {
setup();
b.iter(|| {
Article::session()
.insert(Article::new("Hello world!"))
.unwrap()
})
}
#[bench]
fn
|
(b: &mut test::Bencher) {
setup();
b.iter(|| {
Article::session()
.field("title")
.eq("Hello world!")
.limit(100)
.find()
.unwrap()
})
}
}
|
bench_find
|
identifier_name
|
crud.rs
|
#![cfg_attr(all(feature = "unstable", test), feature(test))]
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate arthas_derive;
extern crate rand;
extern crate arthas;
extern crate env_logger;
#[path = "../tests/common/mod.rs"]
pub mod common;
#[path = "../tests/model/mod.rs"]
pub mod model;
#[cfg(all(feature = "unstable", test))]
mod benches {
extern crate test;
use model::*;
use super::common::setup;
#[bench]
fn bench_a_insert(b: &mut test::Bencher) {
setup();
b.iter(|| {
Article::session()
.insert(Article::new("Hello world!"))
.unwrap()
})
}
#[bench]
fn bench_find(b: &mut test::Bencher)
|
}
|
{
setup();
b.iter(|| {
Article::session()
.field("title")
.eq("Hello world!")
.limit(100)
.find()
.unwrap()
})
}
|
identifier_body
|
crud.rs
|
#![cfg_attr(all(feature = "unstable", test), feature(test))]
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate arthas_derive;
extern crate rand;
extern crate arthas;
extern crate env_logger;
#[path = "../tests/common/mod.rs"]
pub mod common;
#[path = "../tests/model/mod.rs"]
pub mod model;
#[cfg(all(feature = "unstable", test))]
mod benches {
extern crate test;
|
#[bench]
fn bench_a_insert(b: &mut test::Bencher) {
setup();
b.iter(|| {
Article::session()
.insert(Article::new("Hello world!"))
.unwrap()
})
}
#[bench]
fn bench_find(b: &mut test::Bencher) {
setup();
b.iter(|| {
Article::session()
.field("title")
.eq("Hello world!")
.limit(100)
.find()
.unwrap()
})
}
}
|
use model::*;
use super::common::setup;
|
random_line_split
|
expr-block-generic-box2.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::gc::{GC, Gc};
type compare<'a, T> = |T, T|: 'a -> bool;
fn test_generic<T:Clone>(expected: T, eq: compare<T>) {
let actual: T = { expected.clone() };
assert!((eq(expected, actual)));
}
fn test_vec() {
fn compare_vec(v1: Gc<int>, v2: Gc<int>) -> bool { return v1 == v2; }
test_generic::<Gc<int>>(box(GC) 1, compare_vec);
}
pub fn main() { test_vec(); }
|
random_line_split
|
|
expr-block-generic-box2.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::gc::{GC, Gc};
type compare<'a, T> = |T, T|: 'a -> bool;
fn test_generic<T:Clone>(expected: T, eq: compare<T>) {
let actual: T = { expected.clone() };
assert!((eq(expected, actual)));
}
fn test_vec() {
fn compare_vec(v1: Gc<int>, v2: Gc<int>) -> bool { return v1 == v2; }
test_generic::<Gc<int>>(box(GC) 1, compare_vec);
}
pub fn
|
() { test_vec(); }
|
main
|
identifier_name
|
expr-block-generic-box2.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::gc::{GC, Gc};
type compare<'a, T> = |T, T|: 'a -> bool;
fn test_generic<T:Clone>(expected: T, eq: compare<T>) {
let actual: T = { expected.clone() };
assert!((eq(expected, actual)));
}
fn test_vec() {
fn compare_vec(v1: Gc<int>, v2: Gc<int>) -> bool
|
test_generic::<Gc<int>>(box(GC) 1, compare_vec);
}
pub fn main() { test_vec(); }
|
{ return v1 == v2; }
|
identifier_body
|
tests.rs
|
extern crate word2vec;
use word2vec::wordvectors::WordVector;
const PATH: &'static str = "vectors.bin";
#[test]
fn
|
() {
let model = WordVector::load_from_binary(PATH).unwrap();
let res = model.cosine("winter", 10).expect("word not found in vocabulary");
assert_eq!(res.len(), 10);
let only_words: Vec<&str> = res.iter().map(|x| x.0.as_ref()).collect();
assert!(!only_words.contains(&"winter"))
}
#[test]
fn test_unexisting_word_cosine() {
let model = WordVector::load_from_binary(PATH).unwrap();
let result = model.cosine("somenotexistingword", 10);
match result {
Some(_) => assert!(false),
None => assert!(true),
}
}
#[test]
fn test_word_analogy() {
let model = WordVector::load_from_binary(PATH).unwrap();
let mut pos = Vec::new();
pos.push("woman");
pos.push("king");
let mut neg = Vec::new();
neg.push("man");
let res = model.analogy(pos, neg, 10).expect("couldn't find all of the given words");
assert_eq!(res.len(), 10);
let only_words: Vec<&str> = res.iter().map(|x| x.0.as_ref()).collect();
assert!(!only_words.contains(&"woman"));
assert!(!only_words.contains(&"king"));
assert!(!only_words.contains(&"man"));
}
#[test]
fn test_word_analogy_with_empty_params() {
let model = WordVector::load_from_binary(PATH).unwrap();
let result = model.analogy(Vec::new(), Vec::new(), 10);
match result {
Some(_) => assert!(false),
None => assert!(true),
}
}
#[test]
fn test_word_count_is_correctly_returned() {
let v = WordVector::load_from_binary(PATH).unwrap();
assert_eq!(v.word_count(), 71291);
}
|
test_word_cosine
|
identifier_name
|
tests.rs
|
extern crate word2vec;
use word2vec::wordvectors::WordVector;
const PATH: &'static str = "vectors.bin";
#[test]
fn test_word_cosine() {
let model = WordVector::load_from_binary(PATH).unwrap();
let res = model.cosine("winter", 10).expect("word not found in vocabulary");
assert_eq!(res.len(), 10);
let only_words: Vec<&str> = res.iter().map(|x| x.0.as_ref()).collect();
assert!(!only_words.contains(&"winter"))
}
#[test]
fn test_unexisting_word_cosine() {
let model = WordVector::load_from_binary(PATH).unwrap();
let result = model.cosine("somenotexistingword", 10);
match result {
Some(_) => assert!(false),
None => assert!(true),
}
}
#[test]
fn test_word_analogy() {
let model = WordVector::load_from_binary(PATH).unwrap();
let mut pos = Vec::new();
pos.push("woman");
pos.push("king");
let mut neg = Vec::new();
neg.push("man");
let res = model.analogy(pos, neg, 10).expect("couldn't find all of the given words");
assert_eq!(res.len(), 10);
let only_words: Vec<&str> = res.iter().map(|x| x.0.as_ref()).collect();
assert!(!only_words.contains(&"woman"));
assert!(!only_words.contains(&"king"));
assert!(!only_words.contains(&"man"));
}
#[test]
fn test_word_analogy_with_empty_params()
|
#[test]
fn test_word_count_is_correctly_returned() {
let v = WordVector::load_from_binary(PATH).unwrap();
assert_eq!(v.word_count(), 71291);
}
|
{
let model = WordVector::load_from_binary(PATH).unwrap();
let result = model.analogy(Vec::new(), Vec::new(), 10);
match result {
Some(_) => assert!(false),
None => assert!(true),
}
}
|
identifier_body
|
tests.rs
|
extern crate word2vec;
use word2vec::wordvectors::WordVector;
const PATH: &'static str = "vectors.bin";
#[test]
fn test_word_cosine() {
let model = WordVector::load_from_binary(PATH).unwrap();
let res = model.cosine("winter", 10).expect("word not found in vocabulary");
assert_eq!(res.len(), 10);
let only_words: Vec<&str> = res.iter().map(|x| x.0.as_ref()).collect();
assert!(!only_words.contains(&"winter"))
}
#[test]
fn test_unexisting_word_cosine() {
let model = WordVector::load_from_binary(PATH).unwrap();
let result = model.cosine("somenotexistingword", 10);
match result {
Some(_) => assert!(false),
None => assert!(true),
}
}
|
let model = WordVector::load_from_binary(PATH).unwrap();
let mut pos = Vec::new();
pos.push("woman");
pos.push("king");
let mut neg = Vec::new();
neg.push("man");
let res = model.analogy(pos, neg, 10).expect("couldn't find all of the given words");
assert_eq!(res.len(), 10);
let only_words: Vec<&str> = res.iter().map(|x| x.0.as_ref()).collect();
assert!(!only_words.contains(&"woman"));
assert!(!only_words.contains(&"king"));
assert!(!only_words.contains(&"man"));
}
#[test]
fn test_word_analogy_with_empty_params() {
let model = WordVector::load_from_binary(PATH).unwrap();
let result = model.analogy(Vec::new(), Vec::new(), 10);
match result {
Some(_) => assert!(false),
None => assert!(true),
}
}
#[test]
fn test_word_count_is_correctly_returned() {
let v = WordVector::load_from_binary(PATH).unwrap();
assert_eq!(v.word_count(), 71291);
}
|
#[test]
fn test_word_analogy() {
|
random_line_split
|
unix.rs
|
use std::io::Result;
use std::os::unix::io::RawFd;
use std::path::PathBuf;
pub fn before_exec() -> Result<()> {
use libc;
unsafe {
libc::setsid();
libc::ioctl(0, libc::TIOCSCTTY, 1);
}
Ok(())
}
pub fn fork() -> usize {
use libc;
unsafe { libc::fork() as usize }
}
pub fn set_winsize(fd: RawFd, row: u16, col: u16, xpixel: u16, ypixel: u16) {
use libc;
unsafe {
let size = libc::winsize {
ws_row: row,
ws_col: col,
ws_xpixel: xpixel,
ws_ypixel: ypixel,
};
libc::ioctl(fd, libc::TIOCSWINSZ, &size as *const libc::winsize);
}
}
pub fn getpty() -> (RawFd, PathBuf) {
use libc;
use std::ffi::CStr;
use std::fs::OpenOptions;
use std::io::Error;
use std::os::unix::io::IntoRawFd;
const TIOCPKT: libc::c_ulong = 0x5420;
extern "C" {
fn ptsname(fd: libc::c_int) -> *const libc::c_char;
fn grantpt(fd: libc::c_int) -> libc::c_int;
fn unlockpt(fd: libc::c_int) -> libc::c_int;
fn ioctl(fd: libc::c_int, request: libc::c_ulong,...) -> libc::c_int;
}
let master_fd = OpenOptions::new()
.read(true)
.write(true)
.open("/dev/ptmx")
.unwrap()
.into_raw_fd();
unsafe {
let mut flag: libc::c_int = 1;
if ioctl(master_fd, TIOCPKT, &mut flag as *mut libc::c_int) < 0
|
if grantpt(master_fd) < 0 {
panic!("grantpt: {:?}", Error::last_os_error());
}
if unlockpt(master_fd) < 0 {
panic!("unlockpt: {:?}", Error::last_os_error());
}
}
let tty_path = unsafe {
PathBuf::from(
CStr::from_ptr(ptsname(master_fd))
.to_string_lossy()
.into_owned(),
)
};
(master_fd, tty_path)
}
|
{
panic!("ioctl: {:?}", Error::last_os_error());
}
|
conditional_block
|
unix.rs
|
use std::io::Result;
use std::os::unix::io::RawFd;
use std::path::PathBuf;
pub fn before_exec() -> Result<()> {
use libc;
unsafe {
libc::setsid();
libc::ioctl(0, libc::TIOCSCTTY, 1);
}
Ok(())
}
pub fn fork() -> usize {
use libc;
unsafe { libc::fork() as usize }
}
pub fn
|
(fd: RawFd, row: u16, col: u16, xpixel: u16, ypixel: u16) {
use libc;
unsafe {
let size = libc::winsize {
ws_row: row,
ws_col: col,
ws_xpixel: xpixel,
ws_ypixel: ypixel,
};
libc::ioctl(fd, libc::TIOCSWINSZ, &size as *const libc::winsize);
}
}
pub fn getpty() -> (RawFd, PathBuf) {
use libc;
use std::ffi::CStr;
use std::fs::OpenOptions;
use std::io::Error;
use std::os::unix::io::IntoRawFd;
const TIOCPKT: libc::c_ulong = 0x5420;
extern "C" {
fn ptsname(fd: libc::c_int) -> *const libc::c_char;
fn grantpt(fd: libc::c_int) -> libc::c_int;
fn unlockpt(fd: libc::c_int) -> libc::c_int;
fn ioctl(fd: libc::c_int, request: libc::c_ulong,...) -> libc::c_int;
}
let master_fd = OpenOptions::new()
.read(true)
.write(true)
.open("/dev/ptmx")
.unwrap()
.into_raw_fd();
unsafe {
let mut flag: libc::c_int = 1;
if ioctl(master_fd, TIOCPKT, &mut flag as *mut libc::c_int) < 0 {
panic!("ioctl: {:?}", Error::last_os_error());
}
if grantpt(master_fd) < 0 {
panic!("grantpt: {:?}", Error::last_os_error());
}
if unlockpt(master_fd) < 0 {
panic!("unlockpt: {:?}", Error::last_os_error());
}
}
let tty_path = unsafe {
PathBuf::from(
CStr::from_ptr(ptsname(master_fd))
.to_string_lossy()
.into_owned(),
)
};
(master_fd, tty_path)
}
|
set_winsize
|
identifier_name
|
unix.rs
|
use std::io::Result;
use std::os::unix::io::RawFd;
use std::path::PathBuf;
pub fn before_exec() -> Result<()> {
use libc;
unsafe {
libc::setsid();
libc::ioctl(0, libc::TIOCSCTTY, 1);
}
Ok(())
}
pub fn fork() -> usize {
use libc;
unsafe { libc::fork() as usize }
}
pub fn set_winsize(fd: RawFd, row: u16, col: u16, xpixel: u16, ypixel: u16) {
use libc;
unsafe {
let size = libc::winsize {
ws_row: row,
ws_col: col,
ws_xpixel: xpixel,
ws_ypixel: ypixel,
};
libc::ioctl(fd, libc::TIOCSWINSZ, &size as *const libc::winsize);
}
}
pub fn getpty() -> (RawFd, PathBuf) {
use libc;
use std::ffi::CStr;
use std::fs::OpenOptions;
use std::io::Error;
use std::os::unix::io::IntoRawFd;
const TIOCPKT: libc::c_ulong = 0x5420;
extern "C" {
fn ptsname(fd: libc::c_int) -> *const libc::c_char;
fn grantpt(fd: libc::c_int) -> libc::c_int;
fn unlockpt(fd: libc::c_int) -> libc::c_int;
fn ioctl(fd: libc::c_int, request: libc::c_ulong,...) -> libc::c_int;
}
let master_fd = OpenOptions::new()
.read(true)
.write(true)
.open("/dev/ptmx")
.unwrap()
.into_raw_fd();
unsafe {
let mut flag: libc::c_int = 1;
if ioctl(master_fd, TIOCPKT, &mut flag as *mut libc::c_int) < 0 {
panic!("ioctl: {:?}", Error::last_os_error());
|
if unlockpt(master_fd) < 0 {
panic!("unlockpt: {:?}", Error::last_os_error());
}
}
let tty_path = unsafe {
PathBuf::from(
CStr::from_ptr(ptsname(master_fd))
.to_string_lossy()
.into_owned(),
)
};
(master_fd, tty_path)
}
|
}
if grantpt(master_fd) < 0 {
panic!("grantpt: {:?}", Error::last_os_error());
}
|
random_line_split
|
unix.rs
|
use std::io::Result;
use std::os::unix::io::RawFd;
use std::path::PathBuf;
pub fn before_exec() -> Result<()> {
use libc;
unsafe {
libc::setsid();
libc::ioctl(0, libc::TIOCSCTTY, 1);
}
Ok(())
}
pub fn fork() -> usize {
use libc;
unsafe { libc::fork() as usize }
}
pub fn set_winsize(fd: RawFd, row: u16, col: u16, xpixel: u16, ypixel: u16)
|
pub fn getpty() -> (RawFd, PathBuf) {
use libc;
use std::ffi::CStr;
use std::fs::OpenOptions;
use std::io::Error;
use std::os::unix::io::IntoRawFd;
const TIOCPKT: libc::c_ulong = 0x5420;
extern "C" {
fn ptsname(fd: libc::c_int) -> *const libc::c_char;
fn grantpt(fd: libc::c_int) -> libc::c_int;
fn unlockpt(fd: libc::c_int) -> libc::c_int;
fn ioctl(fd: libc::c_int, request: libc::c_ulong,...) -> libc::c_int;
}
let master_fd = OpenOptions::new()
.read(true)
.write(true)
.open("/dev/ptmx")
.unwrap()
.into_raw_fd();
unsafe {
let mut flag: libc::c_int = 1;
if ioctl(master_fd, TIOCPKT, &mut flag as *mut libc::c_int) < 0 {
panic!("ioctl: {:?}", Error::last_os_error());
}
if grantpt(master_fd) < 0 {
panic!("grantpt: {:?}", Error::last_os_error());
}
if unlockpt(master_fd) < 0 {
panic!("unlockpt: {:?}", Error::last_os_error());
}
}
let tty_path = unsafe {
PathBuf::from(
CStr::from_ptr(ptsname(master_fd))
.to_string_lossy()
.into_owned(),
)
};
(master_fd, tty_path)
}
|
{
use libc;
unsafe {
let size = libc::winsize {
ws_row: row,
ws_col: col,
ws_xpixel: xpixel,
ws_ypixel: ypixel,
};
libc::ioctl(fd, libc::TIOCSWINSZ, &size as *const libc::winsize);
}
}
|
identifier_body
|
main.rs
|
fn test1() {
let mut v = vec![100, 32, 57]; //vec! is a macro
for i in &mut v {
*i += 50;
}
println!("iterate v");
for i in &v {
println!("{}", i);
}
let mut v1 = Vec::new();
// v1.push(5.01); //type infer according to first value inserted
v1.push(1);
v1.push(3);
v1.push(5);
// v1.push(5.01); //compile error
println!("iterate v1");
for i in &v1 {
println!("{}", i);
}
let i1 = v1.get(1);
println!("v1[0] = {}", v1[0]);
}
fn test2() {
let s1 = String::from("tic");
let s2 = String::from("tac");
let s1_2 = process_string1(&s1);
println!("s1_2={}", s1_2);
println!("s1={}", s1);
let s2_2 = process_string2(s2);
println!("s2_2={}", s2_2);
// println!("s2={}", s2); //s2 has been moved, s2 is invalid here; value borrowed here after move
let s3 = String::from("head");
let s4 = String::from("shoulder");
let s5 = String::from("knee");
let s6 = String::from("toe");
let sbody = s3 + "_" + &s4 + "_" + &s5 + "_" + &s6 + "_" + "ears";
println!("sbody={}", sbody);
println!("s4={}", s4);
println!("s5={}", s5);
println!("s6={}", s6);
// println!("s3={}", s3); //s3 has been move before here, so s3 is invalid here; compile error
//
|
let str_hello_russia_slice1 = &str_hello_russia[0..4]; //string index?
println!(
"str_hello_russia={}, str_hello_russia_slice1={}",
str_hello_russia, str_hello_russia_slice1
);
let str_hello = "hello";
let str_hello_idx1 = &str_hello[0..1]; //[0, 1), it is [0], string can only access by slice! I don't like this!
println!("str_hello_idx1={}", str_hello_idx1);
let a1 = [1, 2, 3, 5];
println!("a1[0]={}, a1.len={}", a1[0], a1.len());
println!("a1[last]={}", a1[a1.len() - 1]);
}
fn process_string1(s: &String) -> String {
let mut snew = String::new();
snew.push_str(s); //s is already a reference here
snew.push_str("_ending");
return snew;
}
fn process_string2(s: String) -> String {
let mut snew = String::new();
snew.push_str(&s); //push_str only accept reference as its param
snew.push_str("_ending");
return snew;
}
fn main() {
println!("main; -begin");
test2();
println!("main; -end");
}
|
let str_hello_russia = "Здравствуйте";
// let str_hello_russia_idx1 = &str_hello_russia[0]; //compile error
// println!("str_hello_russia_idx1={}", str_hello_russia_idx1);
|
random_line_split
|
main.rs
|
fn test1() {
let mut v = vec![100, 32, 57]; //vec! is a macro
for i in &mut v {
*i += 50;
}
println!("iterate v");
for i in &v {
println!("{}", i);
}
let mut v1 = Vec::new();
// v1.push(5.01); //type infer according to first value inserted
v1.push(1);
v1.push(3);
v1.push(5);
// v1.push(5.01); //compile error
println!("iterate v1");
for i in &v1 {
println!("{}", i);
}
let i1 = v1.get(1);
println!("v1[0] = {}", v1[0]);
}
fn test2()
|
println!("s6={}", s6);
// println!("s3={}", s3); //s3 has been move before here, so s3 is invalid here; compile error
//
let str_hello_russia = "Здравствуйте";
// let str_hello_russia_idx1 = &str_hello_russia[0]; //compile error
// println!("str_hello_russia_idx1={}", str_hello_russia_idx1);
let str_hello_russia_slice1 = &str_hello_russia[0..4]; //string index?
println!(
"str_hello_russia={}, str_hello_russia_slice1={}",
str_hello_russia, str_hello_russia_slice1
);
let str_hello = "hello";
let str_hello_idx1 = &str_hello[0..1]; //[0, 1), it is [0], string can only access by slice! I don't like this!
println!("str_hello_idx1={}", str_hello_idx1);
let a1 = [1, 2, 3, 5];
println!("a1[0]={}, a1.len={}", a1[0], a1.len());
println!("a1[last]={}", a1[a1.len() - 1]);
}
fn process
_string1(s: &String) -> String {
let mut snew = String::new();
snew.push_str(s); //s is already a reference here
snew.push_str("_ending");
return snew;
}
fn process_string2(s: String) -> String {
let mut snew = String::new();
snew.push_str(&s); //push_str only accept reference as its param
snew.push_str("_ending");
return snew;
}
fn main() {
println!("main; -begin");
test2();
println!("main; -end");
}
|
{
let s1 = String::from("tic");
let s2 = String::from("tac");
let s1_2 = process_string1(&s1);
println!("s1_2={}", s1_2);
println!("s1={}", s1);
let s2_2 = process_string2(s2);
println!("s2_2={}", s2_2);
// println!("s2={}", s2); //s2 has been moved, s2 is invalid here; value borrowed here after move
let s3 = String::from("head");
let s4 = String::from("shoulder");
let s5 = String::from("knee");
let s6 = String::from("toe");
let sbody = s3 + "_" + &s4 + "_" + &s5 + "_" + &s6 + "_" + "ears";
println!("sbody={}", sbody);
println!("s4={}", s4);
println!("s5={}", s5);
|
identifier_body
|
main.rs
|
fn test1() {
let mut v = vec![100, 32, 57]; //vec! is a macro
for i in &mut v {
*i += 50;
}
println!("iterate v");
for i in &v {
println!("{}", i);
}
let mut v1 = Vec::new();
// v1.push(5.01); //type infer according to first value inserted
v1.push(1);
v1.push(3);
v1.push(5);
// v1.push(5.01); //compile error
println!("iterate v1");
for i in &v1 {
println!("{}", i);
}
let i1 = v1.get(1);
println!("v1[0] = {}", v1[0]);
}
fn test2() {
let s1 = String::from("tic");
let s2 = String::from("tac");
let s1_2 = process_string1(&s1);
println!("s1_2={}", s1_2);
println!("s1={}", s1);
let s2_2 = process_string2(s2);
println!("s2_2={}", s2_2);
// println!("s2={}", s2); //s2 has been moved, s2 is invalid here; value borrowed here after move
let s3 = String::from("head");
let s4 = String::from("shoulder");
let s5 = String::from("knee");
let s6 = String::from("toe");
let sbody = s3 + "_" + &s4 + "_" + &s5 + "_" + &s6 + "_" + "ears";
println!("sbody={}", sbody);
println!("s4={}", s4);
println!("s5={}", s5);
println!("s6={}", s6);
// println!("s3={}", s3); //s3 has been move before here, so s3 is invalid here; compile error
//
let str_hello_russia = "Здравствуйте";
// let str_hello_russia_idx1 = &str_hello_russia[0]; //compile error
// println!("str_hello_russia_idx1={}", str_hello_russia_idx1);
let str_hello_russia_slice1 = &str_hello_russia[0..4]; //string index?
println!(
"str_hello_russia={}, str_hello_russia_slice1={}",
str_hello_russia, str_hello_russia_slice1
);
let str_hello = "hello";
let str_hello_idx1 = &str_hello[0..1]; //[0, 1), it is [0], string can only access by slice! I don't like this!
println!("str_hello_idx1={}", str_hello_idx1);
let a1 = [1, 2, 3, 5];
println!("a1[0]={}, a1.len={}", a1[0], a1.len());
println!("a1[last]={}", a1[a1.len() - 1]);
}
fn process_string1(s: &String) -> String {
let mut snew = String::new();
snew.push_str(s); //s is already a reference here
snew.push_str("_ending");
return snew;
}
fn process_stri
|
-> String {
let mut snew = String::new();
snew.push_str(&s); //push_str only accept reference as its param
snew.push_str("_ending");
return snew;
}
fn main() {
println!("main; -begin");
test2();
println!("main; -end");
}
|
ng2(s: String)
|
identifier_name
|
remote.rs
|
use std::cell::{RefCell, Ref, Cell};
use std::io::SeekFrom;
use std::io::prelude::*;
use std::mem;
use std::path::Path;
use std::str;
use git2;
use hex::ToHex;
use serde_json;
use core::{PackageId, SourceId};
use ops;
use sources::git;
use sources::registry::{RegistryData, RegistryConfig, INDEX_LOCK};
use util::network;
use util::{FileLock, Filesystem, LazyCell};
use util::{Config, Sha256, ToUrl, Progress};
use util::errors::{CargoErrorKind, CargoResult, CargoResultExt};
pub struct RemoteRegistry<'cfg> {
index_path: Filesystem,
cache_path: Filesystem,
source_id: SourceId,
config: &'cfg Config,
tree: RefCell<Option<git2::Tree<'static>>>,
repo: LazyCell<git2::Repository>,
head: Cell<Option<git2::Oid>>,
}
impl<'cfg> RemoteRegistry<'cfg> {
pub fn new(source_id: &SourceId, config: &'cfg Config, name: &str)
-> RemoteRegistry<'cfg> {
RemoteRegistry {
index_path: config.registry_index_path().join(name),
cache_path: config.registry_cache_path().join(name),
source_id: source_id.clone(),
config: config,
tree: RefCell::new(None),
repo: LazyCell::new(),
head: Cell::new(None),
}
}
fn repo(&self) -> CargoResult<&git2::Repository> {
self.repo.get_or_try_init(|| {
let path = self.index_path.clone().into_path_unlocked();
// Fast path without a lock
if let Ok(repo) = git2::Repository::open(&path) {
return Ok(repo)
}
// Ok, now we need to lock and try the whole thing over again.
let lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
match git2::Repository::open(&path) {
Ok(repo) => Ok(repo),
Err(_) => {
let _ = lock.remove_siblings();
// Note that we'd actually prefer to use a bare repository
// here as we're not actually going to check anything out.
// All versions of Cargo, though, share the same CARGO_HOME,
// so for compatibility with older Cargo which *does* do
// checkouts we make sure to initialize a new full
// repository (not a bare one).
//
// We should change this to `init_bare` whenever we feel
// like enough time has passed or if we change the directory
// that the folder is located in, such as by changing the
// hash at the end of the directory.
Ok(git2::Repository::init(&path)?)
}
}
})
}
fn head(&self) -> CargoResult<git2::Oid> {
if self.head.get().is_none() {
let oid = self.repo()?.refname_to_id("refs/remotes/origin/master")?;
self.head.set(Some(oid));
}
Ok(self.head.get().unwrap())
}
fn tree(&self) -> CargoResult<Ref<git2::Tree>> {
{
let tree = self.tree.borrow();
if tree.is_some() {
return Ok(Ref::map(tree, |s| s.as_ref().unwrap()))
}
}
let repo = self.repo()?;
let commit = repo.find_commit(self.head()?)?;
let tree = commit.tree()?;
// Unfortunately in libgit2 the tree objects look like they've got a
// reference to the repository object which means that a tree cannot
// outlive the repository that it came from. Here we want to cache this
// tree, though, so to accomplish this we transmute it to a static
// lifetime.
//
// Note that we don't actually hand out the static lifetime, instead we
// only return a scoped one from this function. Additionally the repo
// we loaded from (above) lives as long as this object
// (`RemoteRegistry`) so we then just need to ensure that the tree is
// destroyed first in the destructor, hence the destructor on
// `RemoteRegistry` below.
let tree = unsafe {
mem::transmute::<git2::Tree, git2::Tree<'static>>(tree)
};
*self.tree.borrow_mut() = Some(tree);
Ok(Ref::map(self.tree.borrow(), |s| s.as_ref().unwrap()))
}
}
impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
fn index_path(&self) -> &Filesystem {
&self.index_path
}
fn load(&self,
_root: &Path,
path: &Path,
data: &mut FnMut(&[u8]) -> CargoResult<()>) -> CargoResult<()> {
// Note that the index calls this method and the filesystem is locked
// in the index, so we don't need to worry about an `update_index`
// happening in a different process.
let repo = self.repo()?;
let tree = self.tree()?;
let entry = tree.get_path(path)?;
let object = entry.to_object(repo)?;
let blob = match object.as_blob() {
Some(blob) => blob,
None => bail!("path `{}` is not a blob in the git repo", path.display()),
};
data(blob.content())
}
fn config(&mut self) -> CargoResult<Option<RegistryConfig>> {
self.repo()?; // create intermediate dirs and initialize the repo
let _lock = self.index_path.open_ro(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
let mut config = None;
self.load(Path::new(""), Path::new("config.json"), &mut |json| {
config = Some(serde_json::from_slice(json)?);
Ok(())
})?;
Ok(config)
}
fn update_index(&mut self) -> CargoResult<()> {
// Ensure that we'll actually be able to acquire an HTTP handle later on
// once we start trying to download crates. This will weed out any
// problems with `.cargo/config` configuration related to HTTP.
//
// This way if there's a problem the error gets printed before we even
// hit the index, which may not actually read this configuration.
ops::http_handle(self.config)?;
self.repo()?;
self.head.set(None);
*self.tree.borrow_mut() = None;
let _lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
self.config.shell().status("Updating", self.source_id.display_registry())?;
// git fetch origin master
let url = self.source_id.url();
let refspec = "refs/heads/master:refs/remotes/origin/master";
let repo = self.repo.borrow_mut().unwrap();
git::fetch(repo, url, refspec, self.config).chain_err(|| {
format!("failed to fetch `{}`", url)
})?;
Ok(())
}
fn download(&mut self, pkg: &PackageId, checksum: &str)
-> CargoResult<FileLock> {
let filename = format!("{}-{}.crate", pkg.name(), pkg.version());
let path = Path::new(&filename);
// Attempt to open an read-only copy first to avoid an exclusive write
// lock and also work with read-only filesystems. Note that we check the
// length of the file like below to handle interrupted downloads.
//
// If this fails then we fall through to the exclusive path where we may
// have to redownload the file.
if let Ok(dst) = self.cache_path.open_ro(path, self.config, &filename)
|
let mut dst = self.cache_path.open_rw(path, self.config, &filename)?;
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
self.config.shell().status("Downloading", pkg)?;
let config = self.config()?.unwrap();
let mut url = config.dl.to_url()?;
url.path_segments_mut().unwrap()
.push(pkg.name())
.push(&pkg.version().to_string())
.push("download");
// TODO: don't download into memory, but ensure that if we ctrl-c a
// download we should resume either from the start or the middle
// on the next time
let url = url.to_string();
let mut handle = self.config.http()?.borrow_mut();
handle.get(true)?;
handle.url(&url)?;
handle.follow_location(true)?;
let mut state = Sha256::new();
let mut body = Vec::new();
network::with_retry(self.config, || {
state = Sha256::new();
body = Vec::new();
let mut pb = Progress::new("Fetch", self.config);
{
handle.progress(true)?;
let mut handle = handle.transfer();
handle.progress_function(|dl_total, dl_cur, _, _| {
pb.tick(dl_cur as usize, dl_total as usize).is_ok()
})?;
handle.write_function(|buf| {
state.update(buf);
body.extend_from_slice(buf);
Ok(buf.len())
})?;
handle.perform()?;
}
let code = handle.response_code()?;
if code!= 200 && code!= 0 {
let url = handle.effective_url()?.unwrap_or(&url);
Err(CargoErrorKind::HttpNot200(code, url.to_string()).into())
} else {
Ok(())
}
})?;
// Verify what we just downloaded
if state.finish().to_hex()!= checksum {
bail!("failed to verify the checksum of `{}`", pkg)
}
dst.write_all(&body)?;
dst.seek(SeekFrom::Start(0))?;
Ok(dst)
}
}
impl<'cfg> Drop for RemoteRegistry<'cfg> {
fn drop(&mut self) {
// Just be sure to drop this before our other fields
self.tree.borrow_mut().take();
}
}
|
{
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
}
|
conditional_block
|
remote.rs
|
use std::cell::{RefCell, Ref, Cell};
use std::io::SeekFrom;
use std::io::prelude::*;
use std::mem;
use std::path::Path;
use std::str;
use git2;
use hex::ToHex;
use serde_json;
use core::{PackageId, SourceId};
use ops;
use sources::git;
use sources::registry::{RegistryData, RegistryConfig, INDEX_LOCK};
use util::network;
use util::{FileLock, Filesystem, LazyCell};
use util::{Config, Sha256, ToUrl, Progress};
use util::errors::{CargoErrorKind, CargoResult, CargoResultExt};
pub struct
|
<'cfg> {
index_path: Filesystem,
cache_path: Filesystem,
source_id: SourceId,
config: &'cfg Config,
tree: RefCell<Option<git2::Tree<'static>>>,
repo: LazyCell<git2::Repository>,
head: Cell<Option<git2::Oid>>,
}
impl<'cfg> RemoteRegistry<'cfg> {
pub fn new(source_id: &SourceId, config: &'cfg Config, name: &str)
-> RemoteRegistry<'cfg> {
RemoteRegistry {
index_path: config.registry_index_path().join(name),
cache_path: config.registry_cache_path().join(name),
source_id: source_id.clone(),
config: config,
tree: RefCell::new(None),
repo: LazyCell::new(),
head: Cell::new(None),
}
}
fn repo(&self) -> CargoResult<&git2::Repository> {
self.repo.get_or_try_init(|| {
let path = self.index_path.clone().into_path_unlocked();
// Fast path without a lock
if let Ok(repo) = git2::Repository::open(&path) {
return Ok(repo)
}
// Ok, now we need to lock and try the whole thing over again.
let lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
match git2::Repository::open(&path) {
Ok(repo) => Ok(repo),
Err(_) => {
let _ = lock.remove_siblings();
// Note that we'd actually prefer to use a bare repository
// here as we're not actually going to check anything out.
// All versions of Cargo, though, share the same CARGO_HOME,
// so for compatibility with older Cargo which *does* do
// checkouts we make sure to initialize a new full
// repository (not a bare one).
//
// We should change this to `init_bare` whenever we feel
// like enough time has passed or if we change the directory
// that the folder is located in, such as by changing the
// hash at the end of the directory.
Ok(git2::Repository::init(&path)?)
}
}
})
}
fn head(&self) -> CargoResult<git2::Oid> {
if self.head.get().is_none() {
let oid = self.repo()?.refname_to_id("refs/remotes/origin/master")?;
self.head.set(Some(oid));
}
Ok(self.head.get().unwrap())
}
fn tree(&self) -> CargoResult<Ref<git2::Tree>> {
{
let tree = self.tree.borrow();
if tree.is_some() {
return Ok(Ref::map(tree, |s| s.as_ref().unwrap()))
}
}
let repo = self.repo()?;
let commit = repo.find_commit(self.head()?)?;
let tree = commit.tree()?;
// Unfortunately in libgit2 the tree objects look like they've got a
// reference to the repository object which means that a tree cannot
// outlive the repository that it came from. Here we want to cache this
// tree, though, so to accomplish this we transmute it to a static
// lifetime.
//
// Note that we don't actually hand out the static lifetime, instead we
// only return a scoped one from this function. Additionally the repo
// we loaded from (above) lives as long as this object
// (`RemoteRegistry`) so we then just need to ensure that the tree is
// destroyed first in the destructor, hence the destructor on
// `RemoteRegistry` below.
let tree = unsafe {
mem::transmute::<git2::Tree, git2::Tree<'static>>(tree)
};
*self.tree.borrow_mut() = Some(tree);
Ok(Ref::map(self.tree.borrow(), |s| s.as_ref().unwrap()))
}
}
impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
fn index_path(&self) -> &Filesystem {
&self.index_path
}
fn load(&self,
_root: &Path,
path: &Path,
data: &mut FnMut(&[u8]) -> CargoResult<()>) -> CargoResult<()> {
// Note that the index calls this method and the filesystem is locked
// in the index, so we don't need to worry about an `update_index`
// happening in a different process.
let repo = self.repo()?;
let tree = self.tree()?;
let entry = tree.get_path(path)?;
let object = entry.to_object(repo)?;
let blob = match object.as_blob() {
Some(blob) => blob,
None => bail!("path `{}` is not a blob in the git repo", path.display()),
};
data(blob.content())
}
fn config(&mut self) -> CargoResult<Option<RegistryConfig>> {
self.repo()?; // create intermediate dirs and initialize the repo
let _lock = self.index_path.open_ro(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
let mut config = None;
self.load(Path::new(""), Path::new("config.json"), &mut |json| {
config = Some(serde_json::from_slice(json)?);
Ok(())
})?;
Ok(config)
}
fn update_index(&mut self) -> CargoResult<()> {
// Ensure that we'll actually be able to acquire an HTTP handle later on
// once we start trying to download crates. This will weed out any
// problems with `.cargo/config` configuration related to HTTP.
//
// This way if there's a problem the error gets printed before we even
// hit the index, which may not actually read this configuration.
ops::http_handle(self.config)?;
self.repo()?;
self.head.set(None);
*self.tree.borrow_mut() = None;
let _lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
self.config.shell().status("Updating", self.source_id.display_registry())?;
// git fetch origin master
let url = self.source_id.url();
let refspec = "refs/heads/master:refs/remotes/origin/master";
let repo = self.repo.borrow_mut().unwrap();
git::fetch(repo, url, refspec, self.config).chain_err(|| {
format!("failed to fetch `{}`", url)
})?;
Ok(())
}
fn download(&mut self, pkg: &PackageId, checksum: &str)
-> CargoResult<FileLock> {
let filename = format!("{}-{}.crate", pkg.name(), pkg.version());
let path = Path::new(&filename);
// Attempt to open an read-only copy first to avoid an exclusive write
// lock and also work with read-only filesystems. Note that we check the
// length of the file like below to handle interrupted downloads.
//
// If this fails then we fall through to the exclusive path where we may
// have to redownload the file.
if let Ok(dst) = self.cache_path.open_ro(path, self.config, &filename) {
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
}
let mut dst = self.cache_path.open_rw(path, self.config, &filename)?;
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
self.config.shell().status("Downloading", pkg)?;
let config = self.config()?.unwrap();
let mut url = config.dl.to_url()?;
url.path_segments_mut().unwrap()
.push(pkg.name())
.push(&pkg.version().to_string())
.push("download");
// TODO: don't download into memory, but ensure that if we ctrl-c a
// download we should resume either from the start or the middle
// on the next time
let url = url.to_string();
let mut handle = self.config.http()?.borrow_mut();
handle.get(true)?;
handle.url(&url)?;
handle.follow_location(true)?;
let mut state = Sha256::new();
let mut body = Vec::new();
network::with_retry(self.config, || {
state = Sha256::new();
body = Vec::new();
let mut pb = Progress::new("Fetch", self.config);
{
handle.progress(true)?;
let mut handle = handle.transfer();
handle.progress_function(|dl_total, dl_cur, _, _| {
pb.tick(dl_cur as usize, dl_total as usize).is_ok()
})?;
handle.write_function(|buf| {
state.update(buf);
body.extend_from_slice(buf);
Ok(buf.len())
})?;
handle.perform()?;
}
let code = handle.response_code()?;
if code!= 200 && code!= 0 {
let url = handle.effective_url()?.unwrap_or(&url);
Err(CargoErrorKind::HttpNot200(code, url.to_string()).into())
} else {
Ok(())
}
})?;
// Verify what we just downloaded
if state.finish().to_hex()!= checksum {
bail!("failed to verify the checksum of `{}`", pkg)
}
dst.write_all(&body)?;
dst.seek(SeekFrom::Start(0))?;
Ok(dst)
}
}
impl<'cfg> Drop for RemoteRegistry<'cfg> {
fn drop(&mut self) {
// Just be sure to drop this before our other fields
self.tree.borrow_mut().take();
}
}
|
RemoteRegistry
|
identifier_name
|
remote.rs
|
use std::cell::{RefCell, Ref, Cell};
use std::io::SeekFrom;
use std::io::prelude::*;
use std::mem;
use std::path::Path;
use std::str;
use git2;
use hex::ToHex;
use serde_json;
use core::{PackageId, SourceId};
use ops;
use sources::git;
use sources::registry::{RegistryData, RegistryConfig, INDEX_LOCK};
use util::network;
use util::{FileLock, Filesystem, LazyCell};
use util::{Config, Sha256, ToUrl, Progress};
use util::errors::{CargoErrorKind, CargoResult, CargoResultExt};
pub struct RemoteRegistry<'cfg> {
index_path: Filesystem,
cache_path: Filesystem,
source_id: SourceId,
config: &'cfg Config,
tree: RefCell<Option<git2::Tree<'static>>>,
repo: LazyCell<git2::Repository>,
head: Cell<Option<git2::Oid>>,
}
impl<'cfg> RemoteRegistry<'cfg> {
pub fn new(source_id: &SourceId, config: &'cfg Config, name: &str)
-> RemoteRegistry<'cfg> {
RemoteRegistry {
index_path: config.registry_index_path().join(name),
cache_path: config.registry_cache_path().join(name),
source_id: source_id.clone(),
config: config,
tree: RefCell::new(None),
repo: LazyCell::new(),
head: Cell::new(None),
}
}
fn repo(&self) -> CargoResult<&git2::Repository> {
self.repo.get_or_try_init(|| {
let path = self.index_path.clone().into_path_unlocked();
// Fast path without a lock
if let Ok(repo) = git2::Repository::open(&path) {
return Ok(repo)
}
// Ok, now we need to lock and try the whole thing over again.
let lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
match git2::Repository::open(&path) {
Ok(repo) => Ok(repo),
Err(_) => {
let _ = lock.remove_siblings();
// Note that we'd actually prefer to use a bare repository
// here as we're not actually going to check anything out.
// All versions of Cargo, though, share the same CARGO_HOME,
// so for compatibility with older Cargo which *does* do
// checkouts we make sure to initialize a new full
// repository (not a bare one).
//
// We should change this to `init_bare` whenever we feel
// like enough time has passed or if we change the directory
// that the folder is located in, such as by changing the
// hash at the end of the directory.
Ok(git2::Repository::init(&path)?)
}
}
})
}
fn head(&self) -> CargoResult<git2::Oid> {
if self.head.get().is_none() {
let oid = self.repo()?.refname_to_id("refs/remotes/origin/master")?;
self.head.set(Some(oid));
}
Ok(self.head.get().unwrap())
}
fn tree(&self) -> CargoResult<Ref<git2::Tree>> {
{
let tree = self.tree.borrow();
if tree.is_some() {
return Ok(Ref::map(tree, |s| s.as_ref().unwrap()))
}
}
let repo = self.repo()?;
let commit = repo.find_commit(self.head()?)?;
let tree = commit.tree()?;
// Unfortunately in libgit2 the tree objects look like they've got a
// reference to the repository object which means that a tree cannot
// outlive the repository that it came from. Here we want to cache this
// tree, though, so to accomplish this we transmute it to a static
// lifetime.
//
// Note that we don't actually hand out the static lifetime, instead we
// only return a scoped one from this function. Additionally the repo
// we loaded from (above) lives as long as this object
// (`RemoteRegistry`) so we then just need to ensure that the tree is
// destroyed first in the destructor, hence the destructor on
// `RemoteRegistry` below.
let tree = unsafe {
mem::transmute::<git2::Tree, git2::Tree<'static>>(tree)
};
*self.tree.borrow_mut() = Some(tree);
Ok(Ref::map(self.tree.borrow(), |s| s.as_ref().unwrap()))
}
}
impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
fn index_path(&self) -> &Filesystem {
&self.index_path
}
fn load(&self,
_root: &Path,
path: &Path,
data: &mut FnMut(&[u8]) -> CargoResult<()>) -> CargoResult<()> {
// Note that the index calls this method and the filesystem is locked
// in the index, so we don't need to worry about an `update_index`
// happening in a different process.
let repo = self.repo()?;
let tree = self.tree()?;
let entry = tree.get_path(path)?;
let object = entry.to_object(repo)?;
let blob = match object.as_blob() {
Some(blob) => blob,
None => bail!("path `{}` is not a blob in the git repo", path.display()),
};
data(blob.content())
}
fn config(&mut self) -> CargoResult<Option<RegistryConfig>> {
self.repo()?; // create intermediate dirs and initialize the repo
let _lock = self.index_path.open_ro(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
let mut config = None;
self.load(Path::new(""), Path::new("config.json"), &mut |json| {
config = Some(serde_json::from_slice(json)?);
Ok(())
})?;
Ok(config)
}
fn update_index(&mut self) -> CargoResult<()> {
// Ensure that we'll actually be able to acquire an HTTP handle later on
// once we start trying to download crates. This will weed out any
// problems with `.cargo/config` configuration related to HTTP.
//
// This way if there's a problem the error gets printed before we even
// hit the index, which may not actually read this configuration.
ops::http_handle(self.config)?;
self.repo()?;
self.head.set(None);
*self.tree.borrow_mut() = None;
let _lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
self.config.shell().status("Updating", self.source_id.display_registry())?;
// git fetch origin master
let url = self.source_id.url();
let refspec = "refs/heads/master:refs/remotes/origin/master";
let repo = self.repo.borrow_mut().unwrap();
git::fetch(repo, url, refspec, self.config).chain_err(|| {
format!("failed to fetch `{}`", url)
})?;
Ok(())
}
fn download(&mut self, pkg: &PackageId, checksum: &str)
-> CargoResult<FileLock>
|
}
self.config.shell().status("Downloading", pkg)?;
let config = self.config()?.unwrap();
let mut url = config.dl.to_url()?;
url.path_segments_mut().unwrap()
.push(pkg.name())
.push(&pkg.version().to_string())
.push("download");
// TODO: don't download into memory, but ensure that if we ctrl-c a
// download we should resume either from the start or the middle
// on the next time
let url = url.to_string();
let mut handle = self.config.http()?.borrow_mut();
handle.get(true)?;
handle.url(&url)?;
handle.follow_location(true)?;
let mut state = Sha256::new();
let mut body = Vec::new();
network::with_retry(self.config, || {
state = Sha256::new();
body = Vec::new();
let mut pb = Progress::new("Fetch", self.config);
{
handle.progress(true)?;
let mut handle = handle.transfer();
handle.progress_function(|dl_total, dl_cur, _, _| {
pb.tick(dl_cur as usize, dl_total as usize).is_ok()
})?;
handle.write_function(|buf| {
state.update(buf);
body.extend_from_slice(buf);
Ok(buf.len())
})?;
handle.perform()?;
}
let code = handle.response_code()?;
if code!= 200 && code!= 0 {
let url = handle.effective_url()?.unwrap_or(&url);
Err(CargoErrorKind::HttpNot200(code, url.to_string()).into())
} else {
Ok(())
}
})?;
// Verify what we just downloaded
if state.finish().to_hex()!= checksum {
bail!("failed to verify the checksum of `{}`", pkg)
}
dst.write_all(&body)?;
dst.seek(SeekFrom::Start(0))?;
Ok(dst)
}
}
impl<'cfg> Drop for RemoteRegistry<'cfg> {
fn drop(&mut self) {
// Just be sure to drop this before our other fields
self.tree.borrow_mut().take();
}
}
|
{
let filename = format!("{}-{}.crate", pkg.name(), pkg.version());
let path = Path::new(&filename);
// Attempt to open an read-only copy first to avoid an exclusive write
// lock and also work with read-only filesystems. Note that we check the
// length of the file like below to handle interrupted downloads.
//
// If this fails then we fall through to the exclusive path where we may
// have to redownload the file.
if let Ok(dst) = self.cache_path.open_ro(path, self.config, &filename) {
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
}
let mut dst = self.cache_path.open_rw(path, self.config, &filename)?;
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
|
identifier_body
|
remote.rs
|
use std::cell::{RefCell, Ref, Cell};
use std::io::SeekFrom;
use std::io::prelude::*;
use std::mem;
use std::path::Path;
use std::str;
use git2;
use hex::ToHex;
use serde_json;
use core::{PackageId, SourceId};
use ops;
use sources::git;
use sources::registry::{RegistryData, RegistryConfig, INDEX_LOCK};
use util::network;
use util::{FileLock, Filesystem, LazyCell};
use util::{Config, Sha256, ToUrl, Progress};
use util::errors::{CargoErrorKind, CargoResult, CargoResultExt};
pub struct RemoteRegistry<'cfg> {
index_path: Filesystem,
cache_path: Filesystem,
source_id: SourceId,
config: &'cfg Config,
tree: RefCell<Option<git2::Tree<'static>>>,
repo: LazyCell<git2::Repository>,
head: Cell<Option<git2::Oid>>,
}
impl<'cfg> RemoteRegistry<'cfg> {
pub fn new(source_id: &SourceId, config: &'cfg Config, name: &str)
-> RemoteRegistry<'cfg> {
RemoteRegistry {
index_path: config.registry_index_path().join(name),
cache_path: config.registry_cache_path().join(name),
source_id: source_id.clone(),
config: config,
tree: RefCell::new(None),
repo: LazyCell::new(),
head: Cell::new(None),
}
}
fn repo(&self) -> CargoResult<&git2::Repository> {
self.repo.get_or_try_init(|| {
let path = self.index_path.clone().into_path_unlocked();
// Fast path without a lock
if let Ok(repo) = git2::Repository::open(&path) {
return Ok(repo)
}
// Ok, now we need to lock and try the whole thing over again.
let lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
match git2::Repository::open(&path) {
Ok(repo) => Ok(repo),
Err(_) => {
let _ = lock.remove_siblings();
// Note that we'd actually prefer to use a bare repository
// here as we're not actually going to check anything out.
// All versions of Cargo, though, share the same CARGO_HOME,
// so for compatibility with older Cargo which *does* do
// checkouts we make sure to initialize a new full
// repository (not a bare one).
//
// We should change this to `init_bare` whenever we feel
// like enough time has passed or if we change the directory
// that the folder is located in, such as by changing the
// hash at the end of the directory.
Ok(git2::Repository::init(&path)?)
}
}
})
}
fn head(&self) -> CargoResult<git2::Oid> {
if self.head.get().is_none() {
let oid = self.repo()?.refname_to_id("refs/remotes/origin/master")?;
self.head.set(Some(oid));
}
Ok(self.head.get().unwrap())
}
fn tree(&self) -> CargoResult<Ref<git2::Tree>> {
{
let tree = self.tree.borrow();
if tree.is_some() {
return Ok(Ref::map(tree, |s| s.as_ref().unwrap()))
}
}
let repo = self.repo()?;
let commit = repo.find_commit(self.head()?)?;
let tree = commit.tree()?;
// Unfortunately in libgit2 the tree objects look like they've got a
// reference to the repository object which means that a tree cannot
// outlive the repository that it came from. Here we want to cache this
// tree, though, so to accomplish this we transmute it to a static
// lifetime.
//
// Note that we don't actually hand out the static lifetime, instead we
// only return a scoped one from this function. Additionally the repo
// we loaded from (above) lives as long as this object
// (`RemoteRegistry`) so we then just need to ensure that the tree is
// destroyed first in the destructor, hence the destructor on
// `RemoteRegistry` below.
let tree = unsafe {
mem::transmute::<git2::Tree, git2::Tree<'static>>(tree)
};
*self.tree.borrow_mut() = Some(tree);
Ok(Ref::map(self.tree.borrow(), |s| s.as_ref().unwrap()))
}
}
impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
fn index_path(&self) -> &Filesystem {
&self.index_path
}
fn load(&self,
_root: &Path,
path: &Path,
data: &mut FnMut(&[u8]) -> CargoResult<()>) -> CargoResult<()> {
// Note that the index calls this method and the filesystem is locked
// in the index, so we don't need to worry about an `update_index`
// happening in a different process.
let repo = self.repo()?;
let tree = self.tree()?;
|
let entry = tree.get_path(path)?;
let object = entry.to_object(repo)?;
let blob = match object.as_blob() {
Some(blob) => blob,
None => bail!("path `{}` is not a blob in the git repo", path.display()),
};
data(blob.content())
}
fn config(&mut self) -> CargoResult<Option<RegistryConfig>> {
self.repo()?; // create intermediate dirs and initialize the repo
let _lock = self.index_path.open_ro(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
let mut config = None;
self.load(Path::new(""), Path::new("config.json"), &mut |json| {
config = Some(serde_json::from_slice(json)?);
Ok(())
})?;
Ok(config)
}
fn update_index(&mut self) -> CargoResult<()> {
// Ensure that we'll actually be able to acquire an HTTP handle later on
// once we start trying to download crates. This will weed out any
// problems with `.cargo/config` configuration related to HTTP.
//
// This way if there's a problem the error gets printed before we even
// hit the index, which may not actually read this configuration.
ops::http_handle(self.config)?;
self.repo()?;
self.head.set(None);
*self.tree.borrow_mut() = None;
let _lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
self.config.shell().status("Updating", self.source_id.display_registry())?;
// git fetch origin master
let url = self.source_id.url();
let refspec = "refs/heads/master:refs/remotes/origin/master";
let repo = self.repo.borrow_mut().unwrap();
git::fetch(repo, url, refspec, self.config).chain_err(|| {
format!("failed to fetch `{}`", url)
})?;
Ok(())
}
fn download(&mut self, pkg: &PackageId, checksum: &str)
-> CargoResult<FileLock> {
let filename = format!("{}-{}.crate", pkg.name(), pkg.version());
let path = Path::new(&filename);
// Attempt to open an read-only copy first to avoid an exclusive write
// lock and also work with read-only filesystems. Note that we check the
// length of the file like below to handle interrupted downloads.
//
// If this fails then we fall through to the exclusive path where we may
// have to redownload the file.
if let Ok(dst) = self.cache_path.open_ro(path, self.config, &filename) {
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
}
let mut dst = self.cache_path.open_rw(path, self.config, &filename)?;
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
self.config.shell().status("Downloading", pkg)?;
let config = self.config()?.unwrap();
let mut url = config.dl.to_url()?;
url.path_segments_mut().unwrap()
.push(pkg.name())
.push(&pkg.version().to_string())
.push("download");
// TODO: don't download into memory, but ensure that if we ctrl-c a
// download we should resume either from the start or the middle
// on the next time
let url = url.to_string();
let mut handle = self.config.http()?.borrow_mut();
handle.get(true)?;
handle.url(&url)?;
handle.follow_location(true)?;
let mut state = Sha256::new();
let mut body = Vec::new();
network::with_retry(self.config, || {
state = Sha256::new();
body = Vec::new();
let mut pb = Progress::new("Fetch", self.config);
{
handle.progress(true)?;
let mut handle = handle.transfer();
handle.progress_function(|dl_total, dl_cur, _, _| {
pb.tick(dl_cur as usize, dl_total as usize).is_ok()
})?;
handle.write_function(|buf| {
state.update(buf);
body.extend_from_slice(buf);
Ok(buf.len())
})?;
handle.perform()?;
}
let code = handle.response_code()?;
if code!= 200 && code!= 0 {
let url = handle.effective_url()?.unwrap_or(&url);
Err(CargoErrorKind::HttpNot200(code, url.to_string()).into())
} else {
Ok(())
}
})?;
// Verify what we just downloaded
if state.finish().to_hex()!= checksum {
bail!("failed to verify the checksum of `{}`", pkg)
}
dst.write_all(&body)?;
dst.seek(SeekFrom::Start(0))?;
Ok(dst)
}
}
impl<'cfg> Drop for RemoteRegistry<'cfg> {
fn drop(&mut self) {
// Just be sure to drop this before our other fields
self.tree.borrow_mut().take();
}
}
|
random_line_split
|
|
aesenc.rs
|
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn aesenc_1() {
run_test(&Instruction { mnemonic: Mnemonic::AESENC, operand1: Some(Direct(XMM2)), operand2: Some(Direct(XMM2)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 220, 210], OperandSize::Dword)
}
fn aesenc_2() {
run_test(&Instruction { mnemonic: Mnemonic::AESENC, operand1: Some(Direct(XMM0)), operand2: Some(IndirectScaledIndexedDisplaced(EAX, EAX, Four, 939847747, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 220, 132, 128, 67, 240, 4, 56], OperandSize::Dword)
}
fn aesenc_3() {
run_test(&Instruction { mnemonic: Mnemonic::AESENC, operand1: Some(Direct(XMM7)), operand2: Some(Direct(XMM5)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 220, 253], OperandSize::Qword)
}
fn
|
() {
run_test(&Instruction { mnemonic: Mnemonic::AESENC, operand1: Some(Direct(XMM3)), operand2: Some(IndirectDisplaced(RDI, 478839049, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 220, 159, 9, 129, 138, 28], OperandSize::Qword)
}
|
aesenc_4
|
identifier_name
|
aesenc.rs
|
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
|
}
fn aesenc_2() {
run_test(&Instruction { mnemonic: Mnemonic::AESENC, operand1: Some(Direct(XMM0)), operand2: Some(IndirectScaledIndexedDisplaced(EAX, EAX, Four, 939847747, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 220, 132, 128, 67, 240, 4, 56], OperandSize::Dword)
}
fn aesenc_3() {
run_test(&Instruction { mnemonic: Mnemonic::AESENC, operand1: Some(Direct(XMM7)), operand2: Some(Direct(XMM5)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 220, 253], OperandSize::Qword)
}
fn aesenc_4() {
run_test(&Instruction { mnemonic: Mnemonic::AESENC, operand1: Some(Direct(XMM3)), operand2: Some(IndirectDisplaced(RDI, 478839049, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 220, 159, 9, 129, 138, 28], OperandSize::Qword)
}
|
fn aesenc_1() {
run_test(&Instruction { mnemonic: Mnemonic::AESENC, operand1: Some(Direct(XMM2)), operand2: Some(Direct(XMM2)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 220, 210], OperandSize::Dword)
|
random_line_split
|
aesenc.rs
|
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn aesenc_1() {
run_test(&Instruction { mnemonic: Mnemonic::AESENC, operand1: Some(Direct(XMM2)), operand2: Some(Direct(XMM2)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 220, 210], OperandSize::Dword)
}
fn aesenc_2() {
run_test(&Instruction { mnemonic: Mnemonic::AESENC, operand1: Some(Direct(XMM0)), operand2: Some(IndirectScaledIndexedDisplaced(EAX, EAX, Four, 939847747, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 220, 132, 128, 67, 240, 4, 56], OperandSize::Dword)
}
fn aesenc_3() {
run_test(&Instruction { mnemonic: Mnemonic::AESENC, operand1: Some(Direct(XMM7)), operand2: Some(Direct(XMM5)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 220, 253], OperandSize::Qword)
}
fn aesenc_4()
|
{
run_test(&Instruction { mnemonic: Mnemonic::AESENC, operand1: Some(Direct(XMM3)), operand2: Some(IndirectDisplaced(RDI, 478839049, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 220, 159, 9, 129, 138, 28], OperandSize::Qword)
}
|
identifier_body
|
|
generic-function.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// min-lldb-version: 310
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// gdb-command:print *t0
// gdb-check:$1 = 1
// gdb-command:print *t1
// gdb-check:$2 = 2.5
// gdb-command:print ret
// gdbg-check:$3 = {__0 = {__0 = 1, __1 = 2.5}, __1 = {__0 = 2.5, __1 = 1}}
// gdbr-check:$3 = ((1, 2.5), (2.5, 1))
// gdb-command:continue
// gdb-command:print *t0
// gdb-check:$4 = 3.5
// gdb-command:print *t1
// gdb-check:$5 = 4
// gdb-command:print ret
// gdbg-check:$6 = {__0 = {__0 = 3.5, __1 = 4}, __1 = {__0 = 4, __1 = 3.5}}
// gdbr-check:$6 = ((3.5, 4), (4, 3.5))
// gdb-command:continue
// gdb-command:print *t0
// gdb-check:$7 = 5
// gdb-command:print *t1
// gdbg-check:$8 = {a = 6, b = 7.5}
// gdbr-check:$8 = generic_function::Struct {a: 6, b: 7.5}
// gdb-command:print ret
// gdbg-check:$9 = {__0 = {__0 = 5, __1 = {a = 6, b = 7.5}}, __1 = {__0 = {a = 6, b = 7.5}, __1 = 5}}
// gdbr-check:$9 = ((5, generic_function::Struct {a: 6, b: 7.5}), (generic_function::Struct {a: 6, b: 7.5}, 5))
// gdb-command:continue
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print *t0
// lldbg-check:[...]$0 = 1
// lldbr-check:(i32) *t0 = 1
// lldb-command:print *t1
// lldbg-check:[...]$1 = 2.5
// lldbr-check:(f64) *t1 = 2.5
// lldb-command:print ret
// lldbg-check:[...]$2 = ((1, 2.5), (2.5, 1))
// lldbr-check:(((i32, f64), (f64, i32))) ret = { = { = 1 = 2.5 } = { = 2.5 = 1 } }
// lldb-command:continue
// lldb-command:print *t0
// lldbg-check:[...]$3 = 3.5
// lldbr-check:(f64) *t0 = 3.5
// lldb-command:print *t1
// lldbg-check:[...]$4 = 4
// lldbr-check:(u16) *t1 = 4
// lldb-command:print ret
// lldbg-check:[...]$5 = ((3.5, 4), (4, 3.5))
// lldbr-check:(((f64, u16), (u16, f64))) ret = { = { = 3.5 = 4 } = { = 4 = 3.5 } }
// lldb-command:continue
// lldb-command:print *t0
// lldbg-check:[...]$6 = 5
// lldbr-check:(i32) *t0 = 5
// lldb-command:print *t1
// lldbg-check:[...]$7 = Struct { a: 6, b: 7.5 }
// lldbr-check:(generic_function::Struct) *t1 = Struct { a: 6, b: 7.5 }
// lldb-command:print ret
// lldbg-check:[...]$8 = ((5, Struct { a: 6, b: 7.5 }), (Struct { a: 6, b: 7.5 }, 5))
|
#![feature(omit_gdb_pretty_printer_section)]
#![omit_gdb_pretty_printer_section]
#[derive(Clone)]
struct Struct {
a: isize,
b: f64
}
fn dup_tup<T0: Clone, T1: Clone>(t0: &T0, t1: &T1) -> ((T0, T1), (T1, T0)) {
let ret = ((t0.clone(), t1.clone()), (t1.clone(), t0.clone()));
zzz(); // #break
ret
}
fn main() {
let _ = dup_tup(&1, &2.5f64);
let _ = dup_tup(&3.5f64, &4_u16);
let _ = dup_tup(&5, &Struct { a: 6, b: 7.5 });
}
fn zzz() {()}
|
// lldbr-check:(((i32, generic_function::Struct), (generic_function::Struct, i32))) ret = { = { = 5 = Struct { a: 6, b: 7.5 } } = { = Struct { a: 6, b: 7.5 } = 5 } }
// lldb-command:continue
|
random_line_split
|
generic-function.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// min-lldb-version: 310
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// gdb-command:print *t0
// gdb-check:$1 = 1
// gdb-command:print *t1
// gdb-check:$2 = 2.5
// gdb-command:print ret
// gdbg-check:$3 = {__0 = {__0 = 1, __1 = 2.5}, __1 = {__0 = 2.5, __1 = 1}}
// gdbr-check:$3 = ((1, 2.5), (2.5, 1))
// gdb-command:continue
// gdb-command:print *t0
// gdb-check:$4 = 3.5
// gdb-command:print *t1
// gdb-check:$5 = 4
// gdb-command:print ret
// gdbg-check:$6 = {__0 = {__0 = 3.5, __1 = 4}, __1 = {__0 = 4, __1 = 3.5}}
// gdbr-check:$6 = ((3.5, 4), (4, 3.5))
// gdb-command:continue
// gdb-command:print *t0
// gdb-check:$7 = 5
// gdb-command:print *t1
// gdbg-check:$8 = {a = 6, b = 7.5}
// gdbr-check:$8 = generic_function::Struct {a: 6, b: 7.5}
// gdb-command:print ret
// gdbg-check:$9 = {__0 = {__0 = 5, __1 = {a = 6, b = 7.5}}, __1 = {__0 = {a = 6, b = 7.5}, __1 = 5}}
// gdbr-check:$9 = ((5, generic_function::Struct {a: 6, b: 7.5}), (generic_function::Struct {a: 6, b: 7.5}, 5))
// gdb-command:continue
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print *t0
// lldbg-check:[...]$0 = 1
// lldbr-check:(i32) *t0 = 1
// lldb-command:print *t1
// lldbg-check:[...]$1 = 2.5
// lldbr-check:(f64) *t1 = 2.5
// lldb-command:print ret
// lldbg-check:[...]$2 = ((1, 2.5), (2.5, 1))
// lldbr-check:(((i32, f64), (f64, i32))) ret = { = { = 1 = 2.5 } = { = 2.5 = 1 } }
// lldb-command:continue
// lldb-command:print *t0
// lldbg-check:[...]$3 = 3.5
// lldbr-check:(f64) *t0 = 3.5
// lldb-command:print *t1
// lldbg-check:[...]$4 = 4
// lldbr-check:(u16) *t1 = 4
// lldb-command:print ret
// lldbg-check:[...]$5 = ((3.5, 4), (4, 3.5))
// lldbr-check:(((f64, u16), (u16, f64))) ret = { = { = 3.5 = 4 } = { = 4 = 3.5 } }
// lldb-command:continue
// lldb-command:print *t0
// lldbg-check:[...]$6 = 5
// lldbr-check:(i32) *t0 = 5
// lldb-command:print *t1
// lldbg-check:[...]$7 = Struct { a: 6, b: 7.5 }
// lldbr-check:(generic_function::Struct) *t1 = Struct { a: 6, b: 7.5 }
// lldb-command:print ret
// lldbg-check:[...]$8 = ((5, Struct { a: 6, b: 7.5 }), (Struct { a: 6, b: 7.5 }, 5))
// lldbr-check:(((i32, generic_function::Struct), (generic_function::Struct, i32))) ret = { = { = 5 = Struct { a: 6, b: 7.5 } } = { = Struct { a: 6, b: 7.5 } = 5 } }
// lldb-command:continue
#![feature(omit_gdb_pretty_printer_section)]
#![omit_gdb_pretty_printer_section]
#[derive(Clone)]
struct Struct {
a: isize,
b: f64
}
fn dup_tup<T0: Clone, T1: Clone>(t0: &T0, t1: &T1) -> ((T0, T1), (T1, T0)) {
let ret = ((t0.clone(), t1.clone()), (t1.clone(), t0.clone()));
zzz(); // #break
ret
}
fn
|
() {
let _ = dup_tup(&1, &2.5f64);
let _ = dup_tup(&3.5f64, &4_u16);
let _ = dup_tup(&5, &Struct { a: 6, b: 7.5 });
}
fn zzz() {()}
|
main
|
identifier_name
|
debug.rs
|
// Copyright 2019 Jeremy Wall
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use super::Composite;
use super::Primitive;
use super::Value;
use Composite::{List, Tuple};
use Primitive::{Bool, Empty, Float, Int, Str};
use Value::{C, F, M, P, S, T};
impl fmt::Debug for Value {
fn
|
(&self, w: &mut fmt::Formatter) -> fmt::Result {
match self {
P(Bool(v)) => write!(w, "Bool({})", v),
P(Int(v)) => write!(w, "Int({})", v),
P(Float(v)) => write!(w, "Float({})", v),
P(Str(v)) => write!(w, "String({})", v),
P(Empty) => write!(w, "NULL"),
C(List(ref els, _)) => {
write!(w, "List[")?;
for e in els {
write!(w, "{:?},", e)?;
}
write!(w, "]")
}
C(Tuple(ref flds, _)) => {
write!(w, "Tuple(")?;
for (k, v) in flds {
write!(w, "\"{}\"={:?},", k, v)?;
}
write!(w, ")")
}
F(_) => write!(w, "<Func>"),
M(_) => write!(w, "<Module>"),
T(_) => write!(w, "<Expression>"),
S(v) => write!(w, "Symbol({})", v),
}
}
}
|
fmt
|
identifier_name
|
debug.rs
|
// Copyright 2019 Jeremy Wall
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use super::Composite;
use super::Primitive;
|
use Primitive::{Bool, Empty, Float, Int, Str};
use Value::{C, F, M, P, S, T};
impl fmt::Debug for Value {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
match self {
P(Bool(v)) => write!(w, "Bool({})", v),
P(Int(v)) => write!(w, "Int({})", v),
P(Float(v)) => write!(w, "Float({})", v),
P(Str(v)) => write!(w, "String({})", v),
P(Empty) => write!(w, "NULL"),
C(List(ref els, _)) => {
write!(w, "List[")?;
for e in els {
write!(w, "{:?},", e)?;
}
write!(w, "]")
}
C(Tuple(ref flds, _)) => {
write!(w, "Tuple(")?;
for (k, v) in flds {
write!(w, "\"{}\"={:?},", k, v)?;
}
write!(w, ")")
}
F(_) => write!(w, "<Func>"),
M(_) => write!(w, "<Module>"),
T(_) => write!(w, "<Expression>"),
S(v) => write!(w, "Symbol({})", v),
}
}
}
|
use super::Value;
use Composite::{List, Tuple};
|
random_line_split
|
debug.rs
|
// Copyright 2019 Jeremy Wall
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use super::Composite;
use super::Primitive;
use super::Value;
use Composite::{List, Tuple};
use Primitive::{Bool, Empty, Float, Int, Str};
use Value::{C, F, M, P, S, T};
impl fmt::Debug for Value {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result
|
}
F(_) => write!(w, "<Func>"),
M(_) => write!(w, "<Module>"),
T(_) => write!(w, "<Expression>"),
S(v) => write!(w, "Symbol({})", v),
}
}
}
|
{
match self {
P(Bool(v)) => write!(w, "Bool({})", v),
P(Int(v)) => write!(w, "Int({})", v),
P(Float(v)) => write!(w, "Float({})", v),
P(Str(v)) => write!(w, "String({})", v),
P(Empty) => write!(w, "NULL"),
C(List(ref els, _)) => {
write!(w, "List[")?;
for e in els {
write!(w, "{:?},", e)?;
}
write!(w, "]")
}
C(Tuple(ref flds, _)) => {
write!(w, "Tuple(")?;
for (k, v) in flds {
write!(w, "\"{}\"={:?},", k, v)?;
}
write!(w, ")")
|
identifier_body
|
nl.rs
|
impl_trait!(
/// Trait marking constants valid for use in `Nlmsghdr.nl_type`
NlType,
u16
);
impl_var_trait!(
/// Values for `nl_type` in `Nlmsghdr`
Nlmsg, u16, NlType,
Noop => libc::NLMSG_NOOP as u16,
Error => libc::NLMSG_ERROR as u16,
Done => libc::NLMSG_DONE as u16,
Overrun => libc::NLMSG_OVERRUN as u16
);
impl_var_trait!(
/// Values for `nl_type` in `Nlmsghdr`
GenlId, u16, NlType,
Ctrl => libc::GENL_ID_CTRL as u16,
#[cfg(target_env="gnu")]
VfsDquot => libc::GENL_ID_VFS_DQUOT as u16,
#[cfg(target_env="gnu")]
Pmcraid => libc::GENL_ID_PMCRAID as u16
);
impl_var_trait!(
/// rtnetlink-related values for `nl_type` in `Nlmsghdr`
Rtm, u16, NlType,
Newlink => libc::RTM_NEWLINK,
Dellink => libc::RTM_DELLINK,
Getlink => libc::RTM_GETLINK,
Setlink => libc::RTM_SETLINK,
Newaddr => libc::RTM_NEWADDR,
Deladdr => libc::RTM_DELADDR,
Getaddr => libc::RTM_GETADDR,
Newroute => libc::RTM_NEWROUTE,
Delroute => libc::RTM_DELROUTE,
Getroute => libc::RTM_GETROUTE,
Newneigh => libc::RTM_NEWNEIGH,
Delneigh => libc::RTM_DELNEIGH,
|
Getrule => libc::RTM_GETRULE,
Newqdisc=> libc::RTM_NEWQDISC,
Delqdisc=> libc::RTM_DELQDISC,
Getqdisc=> libc::RTM_GETQDISC,
Newtclass => libc::RTM_NEWTCLASS,
Deltclass => libc::RTM_DELTCLASS,
Gettclass => libc::RTM_GETTCLASS,
Newtfilter => libc::RTM_NEWTFILTER,
Deltfilter => libc::RTM_DELTFILTER,
Gettfilter => libc::RTM_GETTFILTER,
Newaction => libc::RTM_NEWACTION,
Delaction => libc::RTM_DELACTION,
Getaction => libc::RTM_GETACTION,
Newprefix => libc::RTM_NEWPREFIX,
Getmulticast => libc::RTM_GETMULTICAST,
Getanycast => libc::RTM_GETANYCAST,
Newneightbl => libc::RTM_NEWNEIGHTBL,
Getneightbl => libc::RTM_GETNEIGHTBL,
Setneightbl => libc::RTM_SETNEIGHTBL,
Newnduseropt => libc::RTM_NEWNDUSEROPT,
Newaddrlabel => libc::RTM_NEWADDRLABEL,
Deladdrlabel => libc::RTM_DELADDRLABEL,
Getaddrlabel => libc::RTM_GETADDRLABEL,
Getdcb => libc::RTM_GETDCB,
Setdcb => libc::RTM_SETDCB,
Newnetconf => libc::RTM_NEWNETCONF,
Getnetconf => libc::RTM_GETNETCONF,
Newmdb => libc::RTM_NEWMDB,
Delmdb => libc::RTM_DELMDB,
Getmdb => libc::RTM_GETMDB,
Newnsid => libc::RTM_NEWNSID,
Delnsid => libc::RTM_DELNSID,
Getnsid => libc::RTM_GETNSID
);
impl_var!(
/// Values for `nl_flags` in `Nlmsghdr`
NlmF, u16,
Request => libc::NLM_F_REQUEST as u16,
Multi => libc::NLM_F_MULTI as u16,
Ack => libc::NLM_F_ACK as u16,
Echo => libc::NLM_F_ECHO as u16,
DumpIntr => libc::NLM_F_DUMP_INTR as u16,
DumpFiltered => libc::NLM_F_DUMP_FILTERED as u16,
Root => libc::NLM_F_ROOT as u16,
Match => libc::NLM_F_MATCH as u16,
Atomic => libc::NLM_F_ATOMIC as u16,
Dump => libc::NLM_F_DUMP as u16,
Replace => libc::NLM_F_REPLACE as u16,
Excl => libc::NLM_F_EXCL as u16,
Create => libc::NLM_F_CREATE as u16,
Append => libc::NLM_F_APPEND as u16
);
|
Getneigh => libc::RTM_GETNEIGH,
Newrule => libc::RTM_NEWRULE,
Delrule => libc::RTM_DELRULE,
|
random_line_split
|
moves.rs
|
only the outermost expression that is needed. The
borrow checker and trans, for example, only care about the outermost
expressions that are moved. It is more efficient therefore just to
store those entries.
Sometimes though we want to know the variables that are moved (in
particular in the borrow checker). For these cases, the set
`moved_variables_set` just collects the ids of variables that are
moved.
Finally, the `capture_map` maps from the node_id of a closure
expression to an array of `CaptureVar` structs detailing which
variables are captured and how (by ref, by copy, by move).
## Enforcement of Moves
The enforcement of moves is done by the borrow checker. Please see
the section "Moves and initialization" in `middle/borrowck/doc.rs`.
## Distributive property
Copies are "distributive" over parenthesization, but blocks are
considered rvalues. What this means is that, for example, neither
`a.clone()` nor `(a).clone()` will move `a` (presuming that `a` has a
linear type and `clone()` takes its self by reference), but
`{a}.clone()` will move `a`, as would `(if cond {a} else {b}).clone()`
and so on.
*/
use middle::pat_util::{pat_bindings};
use middle::freevars;
use middle::ty;
use middle::typeck::{method_map};
use util::ppaux;
use util::ppaux::Repr;
use util::common::indenter;
use std::at_vec;
use std::hashmap::{HashSet, HashMap};
use syntax::ast::*;
use syntax::ast_util;
use syntax::visit;
use syntax::visit::vt;
use syntax::codemap::span;
#[deriving(Encodable, Decodable)]
pub enum CaptureMode {
CapCopy, // Copy the value into the closure.
CapMove, // Move the value into the closure.
CapRef, // Reference directly from parent stack frame (used by `&fn()`).
}
#[deriving(Encodable, Decodable)]
pub struct CaptureVar {
def: def, // Variable being accessed free
span: span, // Location of an access to this variable
mode: CaptureMode // How variable is being accessed
}
pub type CaptureMap = @mut HashMap<node_id, @[CaptureVar]>;
pub type MovesMap = @mut HashSet<node_id>;
/**
* Set of variable node-ids that are moved.
*
* Note: The `VariableMovesMap` stores expression ids that
* are moves, whereas this set stores the ids of the variables
* that are moved at some point */
pub type MovedVariablesSet = @mut HashSet<node_id>;
/** See the section Output on the module comment for explanation. */
pub struct MoveMaps {
moves_map: MovesMap,
moved_variables_set: MovedVariablesSet,
capture_map: CaptureMap
}
struct VisitContext {
tcx: ty::ctxt,
method_map: method_map,
move_maps: MoveMaps
}
#[deriving(Eq)]
enum UseMode {
Move, // This value or something owned by it is moved.
Read // Read no matter what the type.
}
pub fn compute_moves(tcx: ty::ctxt,
method_map: method_map,
crate: &crate) -> MoveMaps
{
let visitor = visit::mk_vt(@visit::Visitor {
visit_fn: compute_modes_for_fn,
visit_expr: compute_modes_for_expr,
visit_local: compute_modes_for_local,
.. *visit::default_visitor()
});
let visit_cx = VisitContext {
tcx: tcx,
method_map: method_map,
move_maps: MoveMaps {
moves_map: @mut HashSet::new(),
capture_map: @mut HashMap::new(),
moved_variables_set: @mut HashSet::new()
}
};
visit::visit_crate(crate, (visit_cx, visitor));
return visit_cx.move_maps;
}
pub fn moved_variable_node_id_from_def(def: def) -> Option<node_id> {
match def {
def_binding(nid, _) |
def_arg(nid, _) |
def_local(nid, _) |
def_self(nid, _) => Some(nid),
_ => None
}
}
///////////////////////////////////////////////////////////////////////////
// Expressions
fn compute_modes_for_local<'a>(local: @local,
(cx, v): (VisitContext,
vt<VisitContext>)) {
cx.use_pat(local.node.pat);
for local.node.init.iter().advance |&init| {
cx.use_expr(init, Read, v);
}
}
fn compute_modes_for_fn(fk: &visit::fn_kind,
decl: &fn_decl,
body: &blk,
span: span,
id: node_id,
(cx, v): (VisitContext,
vt<VisitContext>)) {
for decl.inputs.iter().advance |a| {
cx.use_pat(a.pat);
}
visit::visit_fn(fk, decl, body, span, id, (cx, v));
}
fn compute_modes_for_expr(expr: @expr,
(cx, v): (VisitContext,
vt<VisitContext>))
{
cx.consume_expr(expr, v);
}
impl VisitContext {
pub fn consume_exprs(&self, exprs: &[@expr], visitor: vt<VisitContext>) {
for exprs.iter().advance |expr| {
self.consume_expr(*expr, visitor);
}
}
pub fn consume_expr(&self, expr: @expr, visitor: vt<VisitContext>) {
/*!
* Indicates that the value of `expr` will be consumed,
* meaning either copied or moved depending on its type.
*/
debug!("consume_expr(expr=%s)",
expr.repr(self.tcx));
let expr_ty = ty::expr_ty_adjusted(self.tcx, expr);
if ty::type_moves_by_default(self.tcx, expr_ty) {
self.move_maps.moves_map.insert(expr.id);
self.use_expr(expr, Move, visitor);
} else {
self.use_expr(expr, Read, visitor);
};
}
pub fn consume_block(&self, blk: &blk, visitor: vt<VisitContext>) {
/*!
* Indicates that the value of `blk` will be consumed,
* meaning either copied or moved depending on its type.
*/
debug!("consume_block(blk.id=%?)", blk.id);
for blk.stmts.iter().advance |stmt| {
(visitor.visit_stmt)(*stmt, (*self, visitor));
}
for blk.expr.iter().advance |tail_expr| {
self.consume_expr(*tail_expr, visitor);
}
}
pub fn use_expr(&self,
expr: @expr,
expr_mode: UseMode,
visitor: vt<VisitContext>) {
/*!
* Indicates that `expr` is used with a given mode. This will
* in turn trigger calls to the subcomponents of `expr`.
*/
debug!("use_expr(expr=%s, mode=%?)",
expr.repr(self.tcx),
expr_mode);
// `expr_mode` refers to the post-adjustment value. If one of
// those adjustments is to take a reference, then it's only
// reading the underlying expression, not moving it.
let comp_mode = match self.tcx.adjustments.find(&expr.id) {
Some(&@ty::AutoDerefRef(
ty::AutoDerefRef {
autoref: Some(_), _})) => Read,
_ => expr_mode
};
debug!("comp_mode = %?", comp_mode);
match expr.node {
expr_path(*) | expr_self => {
match comp_mode {
Move => {
let def = self.tcx.def_map.get_copy(&expr.id);
let r = moved_variable_node_id_from_def(def);
for r.iter().advance |&id| {
self.move_maps.moved_variables_set.insert(id);
}
}
Read => {}
}
}
expr_unary(_, deref, base) => { // *base
if!self.use_overloaded_operator(
expr, base, [], visitor)
{
// Moving out of *base moves out of base.
self.use_expr(base, comp_mode, visitor);
}
}
expr_field(base, _, _) => { // base.f
// Moving out of base.f moves out of base.
self.use_expr(base, comp_mode, visitor);
}
expr_index(_, lhs, rhs) => { // lhs[rhs]
if!self.use_overloaded_operator(
expr, lhs, [rhs], visitor)
{
self.use_expr(lhs, comp_mode, visitor);
self.consume_expr(rhs, visitor);
}
}
expr_call(callee, ref args, _) => { // callee(args)
// Figure out whether the called function is consumed.
let mode = match ty::get(ty::expr_ty(self.tcx, callee)).sty {
ty::ty_closure(ref cty) => {
match cty.onceness {
Once => Move,
Many => Read,
}
},
ty::ty_bare_fn(*) => Read,
ref x =>
self.tcx.sess.span_bug(callee.span,
fmt!("non-function type in moves for expr_call: %?", x)),
};
// Note we're not using consume_expr, which uses type_moves_by_default
// to determine the mode, for this. The reason is that while stack
// closures should be noncopyable, they shouldn't move by default;
// calling a closure should only consume it if it's once.
if mode == Move {
self.move_maps.moves_map.insert(callee.id);
}
self.use_expr(callee, mode, visitor);
self.use_fn_args(callee.id, *args, visitor);
}
expr_method_call(callee_id, rcvr, _, _, ref args, _) => { // callee.m(args)
// Implicit self is equivalent to & mode, but every
// other kind should be + mode.
self.use_receiver(rcvr, visitor);
self.use_fn_args(callee_id, *args, visitor);
}
expr_struct(_, ref fields, opt_with) => {
for fields.iter().advance |field| {
self.consume_expr(field.node.expr, visitor);
}
for opt_with.iter().advance |with_expr| {
// If there are any fields whose type is move-by-default,
// then `with` is consumed, otherwise it is only read
let with_ty = ty::expr_ty(self.tcx, *with_expr);
let with_fields = match ty::get(with_ty).sty {
ty::ty_struct(did, ref substs) => {
ty::struct_fields(self.tcx, did, substs)
}
ref r => {
self.tcx.sess.span_bug(
with_expr.span,
fmt!("bad base expr type in record: %?", r))
}
};
// The `with` expr must be consumed if it contains
// any fields which (1) were not explicitly
// specified and (2) have a type that
// moves-by-default:
let consume_with = with_fields.iter().any(|tf| {
!fields.iter().any(|f| f.node.ident == tf.ident) &&
ty::type_moves_by_default(self.tcx, tf.mt.ty)
});
if consume_with {
self.consume_expr(*with_expr, visitor);
} else
|
}
}
expr_tup(ref exprs) => {
self.consume_exprs(*exprs, visitor);
}
expr_if(cond_expr, ref then_blk, opt_else_expr) => {
self.consume_expr(cond_expr, visitor);
self.consume_block(then_blk, visitor);
for opt_else_expr.iter().advance |else_expr| {
self.consume_expr(*else_expr, visitor);
}
}
expr_match(discr, ref arms) => {
// We must do this first so that `arms_have_by_move_bindings`
// below knows which bindings are moves.
for arms.iter().advance |arm| {
self.consume_arm(arm, visitor);
}
// The discriminant may, in fact, be partially moved
// if there are by-move bindings, but borrowck deals
// with that itself.
self.use_expr(discr, Read, visitor);
}
expr_copy(base) => {
self.use_expr(base, Read, visitor);
}
expr_paren(base) => {
// Note: base is not considered a *component* here, so
// use `expr_mode` not `comp_mode`.
self.use_expr(base, expr_mode, visitor);
}
expr_vec(ref exprs, _) => {
self.consume_exprs(*exprs, visitor);
}
expr_addr_of(_, base) => { // &base
self.use_expr(base, Read, visitor);
}
expr_inline_asm(*) |
expr_break(*) |
expr_again(*) |
expr_lit(*) => {}
expr_loop(ref blk, _) => {
self.consume_block(blk, visitor);
}
expr_log(a_expr, b_expr) => {
self.consume_expr(a_expr, visitor);
self.use_expr(b_expr, Read, visitor);
}
expr_while(cond_expr, ref blk) => {
self.consume_expr(cond_expr, visitor);
self.consume_block(blk, visitor);
}
expr_unary(_, _, lhs) => {
if!self.use_overloaded_operator(
expr, lhs, [], visitor)
{
self.consume_expr(lhs, visitor);
}
}
expr_binary(_, _, lhs, rhs) => {
if!self.use_overloaded_operator(
expr, lhs, [rhs], visitor)
{
self.consume_expr(lhs, visitor);
self.consume_expr(rhs, visitor);
}
}
expr_block(ref blk) => {
self.consume_block(blk, visitor);
}
expr_ret(ref opt_expr) => {
for opt_expr.iter().advance |expr| {
self.consume_expr(*expr, visitor);
}
}
expr_assign(lhs, rhs) => {
self.use_expr(lhs, Read, visitor);
self.consume_expr(rhs, visitor);
}
expr_cast(base, _) => {
self.consume_expr(base, visitor);
}
expr_assign_op(_, _, lhs, rhs) => {
// FIXME(#4712) --- Overloaded operators?
//
// if!self.use_overloaded_operator(
// expr, DoDerefArgs, lhs, [rhs], visitor)
// {
self.consume_expr(lhs, visitor);
self.consume_expr(rhs, visitor);
// }
}
expr_repeat(base, count, _) => {
self.consume_expr(base, visitor);
self.consume_expr(count, visitor);
}
expr_loop_body(base) |
expr_do_body(base) => {
self.use_expr(base, comp_mode, visitor);
}
expr_fn_block(ref decl, ref body) => {
for decl.inputs.iter().advance |a| {
self.use_pat(a.pat);
}
let cap_vars = self.compute_captures(expr.id);
self.move_maps.capture_map.insert(expr.id, cap_vars);
self.consume_block(body, visitor);
}
expr_vstore(base, _) => {
self.use_expr(base, comp_mode, visitor);
}
expr_mac(*) => {
self.tcx.sess.span_bug(
|
{
self.use_expr(*with_expr, Read, visitor);
}
|
conditional_block
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.