file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
mod.rs
use crate::rendering::*; use crate::geometry::*; /// IMPORTANT: Must form a star domain at its center #[derive(Clone, Debug)] pub struct Polygon { pub corners: Vec<Point>, /// defined anti-clockwise pub center: Point, pub rot: Rotation, /// anti-clockwise angle w.r.t. positive z-axis pub pos: Point3, pub color: Color, pub fixed: bool } impl Polygon { pub fn new_regular( corners: Vec<Point>, center: Point, pos: Point3, color: Color, fixed: bool ) -> Polygon { Polygon { corners, center, rot: Rotation::new(0.0), pos, color, fixed, } } pub fn get_vertices(self) -> Vec<PolygonVertex> { let mut output: Vec<PolygonVertex> = vec![]; let corners_it_shift = self.corners.clone().into_iter().cycle().skip(1); for (corner1, corner2) in self.corners.into_iter().zip(corners_it_shift) { output.push(PolygonVertex { corner1: corner1.into(), corner2: corner2.into(), center: self.center.into(), rot: self.rot.get_matrix_f32(), pos: self.pos.into(), color: self.color.get_array_f32(), fixed_pos: self.fixed as u32 }); } output } } impl GliumStandardPrimitive for Polygon { type Vertex = PolygonVertex; fn get_shaders() -> Shaders { Shaders::VertexGeometryFragment( include_str!("polygon.vs"),
) } fn get_vertex(self) -> Vec<Self::Vertex> { self.get_vertices() } } #[derive(Copy, Clone, Debug)] pub struct PolygonVertex { pub corner1: [f32; 2], pub corner2: [f32; 2], pub center: [f32; 2], pub rot: [[f32; 2]; 2], pub pos: [f32; 3], pub color: [f32; 4], pub fixed_pos: u32 } implement_vertex!(PolygonVertex, corner1, corner2, center, rot, pos, color, fixed_pos);
include_str!("polygon.ges"), include_str!("polygon.fs")
random_line_split
mod.rs
use crate::rendering::*; use crate::geometry::*; /// IMPORTANT: Must form a star domain at its center #[derive(Clone, Debug)] pub struct Polygon { pub corners: Vec<Point>, /// defined anti-clockwise pub center: Point, pub rot: Rotation, /// anti-clockwise angle w.r.t. positive z-axis pub pos: Point3, pub color: Color, pub fixed: bool } impl Polygon { pub fn new_regular( corners: Vec<Point>, center: Point, pos: Point3, color: Color, fixed: bool ) -> Polygon { Polygon { corners, center, rot: Rotation::new(0.0), pos, color, fixed, } } pub fn get_vertices(self) -> Vec<PolygonVertex> { let mut output: Vec<PolygonVertex> = vec![]; let corners_it_shift = self.corners.clone().into_iter().cycle().skip(1); for (corner1, corner2) in self.corners.into_iter().zip(corners_it_shift) { output.push(PolygonVertex { corner1: corner1.into(), corner2: corner2.into(), center: self.center.into(), rot: self.rot.get_matrix_f32(), pos: self.pos.into(), color: self.color.get_array_f32(), fixed_pos: self.fixed as u32 }); } output } } impl GliumStandardPrimitive for Polygon { type Vertex = PolygonVertex; fn get_shaders() -> Shaders { Shaders::VertexGeometryFragment( include_str!("polygon.vs"), include_str!("polygon.ges"), include_str!("polygon.fs") ) } fn get_vertex(self) -> Vec<Self::Vertex> { self.get_vertices() } } #[derive(Copy, Clone, Debug)] pub struct
{ pub corner1: [f32; 2], pub corner2: [f32; 2], pub center: [f32; 2], pub rot: [[f32; 2]; 2], pub pos: [f32; 3], pub color: [f32; 4], pub fixed_pos: u32 } implement_vertex!(PolygonVertex, corner1, corner2, center, rot, pos, color, fixed_pos);
PolygonVertex
identifier_name
collapsible_match.rs
use clippy_utils::diagnostics::span_lint_and_then; use clippy_utils::visitors::LocalUsedVisitor; use clippy_utils::{higher, is_lang_ctor, is_unit_expr, path_to_local, peel_ref_operators, SpanlessEq}; use if_chain::if_chain; use rustc_hir::LangItem::OptionNone; use rustc_hir::{Arm, Expr, ExprKind, Guard, HirId, MatchSource, Pat, PatKind, StmtKind}; use rustc_lint::{LateContext, LateLintPass}; use rustc_session::{declare_lint_pass, declare_tool_lint}; use rustc_span::{MultiSpan, Span}; declare_clippy_lint! { /// ### What it does /// Finds nested `match` or `if let` expressions where the patterns may be "collapsed" together /// without adding any branches. /// /// Note that this lint is not intended to find _all_ cases where nested match patterns can be merged, but only /// cases where merging would most likely make the code more readable. /// /// ### Why is this bad? /// It is unnecessarily verbose and complex. /// /// ### Example /// ```rust /// fn func(opt: Option<Result<u64, String>>) { /// let n = match opt { /// Some(n) => match n { /// Ok(n) => n, /// _ => return, /// } /// None => return, /// }; /// } /// ``` /// Use instead: /// ```rust /// fn func(opt: Option<Result<u64, String>>) { /// let n = match opt { /// Some(Ok(n)) => n, /// _ => return, /// }; /// } /// ``` pub COLLAPSIBLE_MATCH, style, "Nested `match` or `if let` expressions where the patterns may be \"collapsed\" together." } declare_lint_pass!(CollapsibleMatch => [COLLAPSIBLE_MATCH]); impl<'tcx> LateLintPass<'tcx> for CollapsibleMatch { fn
(&mut self, cx: &LateContext<'tcx>, expr: &Expr<'tcx>) { match IfLetOrMatch::parse(cx, expr) { Some(IfLetOrMatch::Match(_, arms, _)) => { if let Some(els_arm) = arms.iter().rfind(|arm| arm_is_wild_like(cx, arm)) { for arm in arms { check_arm(cx, true, arm.pat, arm.body, arm.guard.as_ref(), Some(els_arm.body)); } } } Some(IfLetOrMatch::IfLet(_, pat, body, els)) => { check_arm(cx, false, pat, body, None, els); } None => {} } } } fn check_arm<'tcx>( cx: &LateContext<'tcx>, outer_is_match: bool, outer_pat: &'tcx Pat<'tcx>, outer_then_body: &'tcx Expr<'tcx>, outer_guard: Option<&'tcx Guard<'tcx>>, outer_else_body: Option<&'tcx Expr<'tcx>> ) { let inner_expr = strip_singleton_blocks(outer_then_body); if_chain! { if let Some(inner) = IfLetOrMatch::parse(cx, inner_expr); if let Some((inner_scrutinee, inner_then_pat, inner_else_body)) = match inner { IfLetOrMatch::IfLet(scrutinee, pat, _, els) => Some((scrutinee, pat, els)), IfLetOrMatch::Match(scrutinee, arms,..) => if_chain! { // if there are more than two arms, collapsing would be non-trivial if arms.len() == 2 && arms.iter().all(|a| a.guard.is_none()); // one of the arms must be "wild-like" if let Some(wild_idx) = arms.iter().rposition(|a| arm_is_wild_like(cx, a)); then { let (then, els) = (&arms[1 - wild_idx], &arms[wild_idx]); Some((scrutinee, then.pat, Some(els.body))) } else { None } }, }; if outer_pat.span.ctxt() == inner_scrutinee.span.ctxt(); // match expression must be a local binding // match <local> {.. } if let Some(binding_id) = path_to_local(peel_ref_operators(cx, inner_scrutinee)); if!pat_contains_or(inner_then_pat); // the binding must come from the pattern of the containing match arm //..<local>.. => match <local> {.. } if let Some(binding_span) = find_pat_binding(outer_pat, binding_id); // the "else" branches must be equal if match (outer_else_body, inner_else_body) { (None, None) => true, (None, Some(e)) | (Some(e), None) => is_unit_expr(e), (Some(a), Some(b)) => SpanlessEq::new(cx).eq_expr(a, b), }; // the binding must not be used in the if guard let mut used_visitor = LocalUsedVisitor::new(cx, binding_id); if outer_guard.map_or(true, |(Guard::If(e) | Guard::IfLet(_, e))|!used_visitor.check_expr(e)); //...or anywhere in the inner expression if match inner { IfLetOrMatch::IfLet(_, _, body, els) => { !used_visitor.check_expr(body) && els.map_or(true, |e|!used_visitor.check_expr(e)) }, IfLetOrMatch::Match(_, arms,..) =>!arms.iter().any(|arm| used_visitor.check_arm(arm)), }; then { let msg = format!( "this `{}` can be collapsed into the outer `{}`", if matches!(inner, IfLetOrMatch::Match(..)) { "match" } else { "if let" }, if outer_is_match { "match" } else { "if let" }, ); span_lint_and_then( cx, COLLAPSIBLE_MATCH, inner_expr.span, &msg, |diag| { let mut help_span = MultiSpan::from_spans(vec![binding_span, inner_then_pat.span]); help_span.push_span_label(binding_span, "replace this binding".into()); help_span.push_span_label(inner_then_pat.span, "with this pattern".into()); diag.span_help(help_span, "the outer pattern can be modified to include the inner pattern"); }, ); } } } fn strip_singleton_blocks<'hir>(mut expr: &'hir Expr<'hir>) -> &'hir Expr<'hir> { while let ExprKind::Block(block, _) = expr.kind { match (block.stmts, block.expr) { ([stmt], None) => match stmt.kind { StmtKind::Expr(e) | StmtKind::Semi(e) => expr = e, _ => break, }, ([], Some(e)) => expr = e, _ => break, } } expr } enum IfLetOrMatch<'hir> { Match(&'hir Expr<'hir>, &'hir [Arm<'hir>], MatchSource), /// scrutinee, pattern, then block, else block IfLet(&'hir Expr<'hir>, &'hir Pat<'hir>, &'hir Expr<'hir>, Option<&'hir Expr<'hir>>), } impl<'hir> IfLetOrMatch<'hir> { fn parse(cx: &LateContext<'_>, expr: &Expr<'hir>) -> Option<Self> { match expr.kind { ExprKind::Match(expr, arms, source) => Some(Self::Match(expr, arms, source)), _ => higher::IfLet::hir(cx, expr).map(|higher::IfLet { let_expr, let_pat, if_then, if_else }| { Self::IfLet(let_expr, let_pat, if_then, if_else) }) } } } /// A "wild-like" arm has a wild (`_`) or `None` pattern and no guard. Such arms can be "collapsed" /// into a single wild arm without any significant loss in semantics or readability. fn arm_is_wild_like(cx: &LateContext<'_>, arm: &Arm<'_>) -> bool { if arm.guard.is_some() { return false; } match arm.pat.kind { PatKind::Binding(..) | PatKind::Wild => true, PatKind::Path(ref qpath) => is_lang_ctor(cx, qpath, OptionNone), _ => false, } } fn find_pat_binding(pat: &Pat<'_>, hir_id: HirId) -> Option<Span> { let mut span = None; pat.walk_short(|p| match &p.kind { // ignore OR patterns PatKind::Or(_) => false, PatKind::Binding(_bm, _, _ident, _) => { let found = p.hir_id == hir_id; if found { span = Some(p.span); } !found }, _ => true, }); span } fn pat_contains_or(pat: &Pat<'_>) -> bool { let mut result = false; pat.walk(|p| { let is_or = matches!(p.kind, PatKind::Or(_)); result |= is_or; !is_or }); result }
check_expr
identifier_name
collapsible_match.rs
use clippy_utils::diagnostics::span_lint_and_then; use clippy_utils::visitors::LocalUsedVisitor; use clippy_utils::{higher, is_lang_ctor, is_unit_expr, path_to_local, peel_ref_operators, SpanlessEq}; use if_chain::if_chain; use rustc_hir::LangItem::OptionNone; use rustc_hir::{Arm, Expr, ExprKind, Guard, HirId, MatchSource, Pat, PatKind, StmtKind}; use rustc_lint::{LateContext, LateLintPass}; use rustc_session::{declare_lint_pass, declare_tool_lint}; use rustc_span::{MultiSpan, Span}; declare_clippy_lint! { /// ### What it does /// Finds nested `match` or `if let` expressions where the patterns may be "collapsed" together /// without adding any branches. /// /// Note that this lint is not intended to find _all_ cases where nested match patterns can be merged, but only /// cases where merging would most likely make the code more readable. /// /// ### Why is this bad? /// It is unnecessarily verbose and complex. /// /// ### Example /// ```rust /// fn func(opt: Option<Result<u64, String>>) { /// let n = match opt { /// Some(n) => match n { /// Ok(n) => n, /// _ => return, /// } /// None => return, /// }; /// } /// ``` /// Use instead: /// ```rust /// fn func(opt: Option<Result<u64, String>>) { /// let n = match opt { /// Some(Ok(n)) => n, /// _ => return, /// }; /// } /// ``` pub COLLAPSIBLE_MATCH, style, "Nested `match` or `if let` expressions where the patterns may be \"collapsed\" together." } declare_lint_pass!(CollapsibleMatch => [COLLAPSIBLE_MATCH]); impl<'tcx> LateLintPass<'tcx> for CollapsibleMatch { fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &Expr<'tcx>)
} fn check_arm<'tcx>( cx: &LateContext<'tcx>, outer_is_match: bool, outer_pat: &'tcx Pat<'tcx>, outer_then_body: &'tcx Expr<'tcx>, outer_guard: Option<&'tcx Guard<'tcx>>, outer_else_body: Option<&'tcx Expr<'tcx>> ) { let inner_expr = strip_singleton_blocks(outer_then_body); if_chain! { if let Some(inner) = IfLetOrMatch::parse(cx, inner_expr); if let Some((inner_scrutinee, inner_then_pat, inner_else_body)) = match inner { IfLetOrMatch::IfLet(scrutinee, pat, _, els) => Some((scrutinee, pat, els)), IfLetOrMatch::Match(scrutinee, arms,..) => if_chain! { // if there are more than two arms, collapsing would be non-trivial if arms.len() == 2 && arms.iter().all(|a| a.guard.is_none()); // one of the arms must be "wild-like" if let Some(wild_idx) = arms.iter().rposition(|a| arm_is_wild_like(cx, a)); then { let (then, els) = (&arms[1 - wild_idx], &arms[wild_idx]); Some((scrutinee, then.pat, Some(els.body))) } else { None } }, }; if outer_pat.span.ctxt() == inner_scrutinee.span.ctxt(); // match expression must be a local binding // match <local> {.. } if let Some(binding_id) = path_to_local(peel_ref_operators(cx, inner_scrutinee)); if!pat_contains_or(inner_then_pat); // the binding must come from the pattern of the containing match arm //..<local>.. => match <local> {.. } if let Some(binding_span) = find_pat_binding(outer_pat, binding_id); // the "else" branches must be equal if match (outer_else_body, inner_else_body) { (None, None) => true, (None, Some(e)) | (Some(e), None) => is_unit_expr(e), (Some(a), Some(b)) => SpanlessEq::new(cx).eq_expr(a, b), }; // the binding must not be used in the if guard let mut used_visitor = LocalUsedVisitor::new(cx, binding_id); if outer_guard.map_or(true, |(Guard::If(e) | Guard::IfLet(_, e))|!used_visitor.check_expr(e)); //...or anywhere in the inner expression if match inner { IfLetOrMatch::IfLet(_, _, body, els) => { !used_visitor.check_expr(body) && els.map_or(true, |e|!used_visitor.check_expr(e)) }, IfLetOrMatch::Match(_, arms,..) =>!arms.iter().any(|arm| used_visitor.check_arm(arm)), }; then { let msg = format!( "this `{}` can be collapsed into the outer `{}`", if matches!(inner, IfLetOrMatch::Match(..)) { "match" } else { "if let" }, if outer_is_match { "match" } else { "if let" }, ); span_lint_and_then( cx, COLLAPSIBLE_MATCH, inner_expr.span, &msg, |diag| { let mut help_span = MultiSpan::from_spans(vec![binding_span, inner_then_pat.span]); help_span.push_span_label(binding_span, "replace this binding".into()); help_span.push_span_label(inner_then_pat.span, "with this pattern".into()); diag.span_help(help_span, "the outer pattern can be modified to include the inner pattern"); }, ); } } } fn strip_singleton_blocks<'hir>(mut expr: &'hir Expr<'hir>) -> &'hir Expr<'hir> { while let ExprKind::Block(block, _) = expr.kind { match (block.stmts, block.expr) { ([stmt], None) => match stmt.kind { StmtKind::Expr(e) | StmtKind::Semi(e) => expr = e, _ => break, }, ([], Some(e)) => expr = e, _ => break, } } expr } enum IfLetOrMatch<'hir> { Match(&'hir Expr<'hir>, &'hir [Arm<'hir>], MatchSource), /// scrutinee, pattern, then block, else block IfLet(&'hir Expr<'hir>, &'hir Pat<'hir>, &'hir Expr<'hir>, Option<&'hir Expr<'hir>>), } impl<'hir> IfLetOrMatch<'hir> { fn parse(cx: &LateContext<'_>, expr: &Expr<'hir>) -> Option<Self> { match expr.kind { ExprKind::Match(expr, arms, source) => Some(Self::Match(expr, arms, source)), _ => higher::IfLet::hir(cx, expr).map(|higher::IfLet { let_expr, let_pat, if_then, if_else }| { Self::IfLet(let_expr, let_pat, if_then, if_else) }) } } } /// A "wild-like" arm has a wild (`_`) or `None` pattern and no guard. Such arms can be "collapsed" /// into a single wild arm without any significant loss in semantics or readability. fn arm_is_wild_like(cx: &LateContext<'_>, arm: &Arm<'_>) -> bool { if arm.guard.is_some() { return false; } match arm.pat.kind { PatKind::Binding(..) | PatKind::Wild => true, PatKind::Path(ref qpath) => is_lang_ctor(cx, qpath, OptionNone), _ => false, } } fn find_pat_binding(pat: &Pat<'_>, hir_id: HirId) -> Option<Span> { let mut span = None; pat.walk_short(|p| match &p.kind { // ignore OR patterns PatKind::Or(_) => false, PatKind::Binding(_bm, _, _ident, _) => { let found = p.hir_id == hir_id; if found { span = Some(p.span); } !found }, _ => true, }); span } fn pat_contains_or(pat: &Pat<'_>) -> bool { let mut result = false; pat.walk(|p| { let is_or = matches!(p.kind, PatKind::Or(_)); result |= is_or; !is_or }); result }
{ match IfLetOrMatch::parse(cx, expr) { Some(IfLetOrMatch::Match(_, arms, _)) => { if let Some(els_arm) = arms.iter().rfind(|arm| arm_is_wild_like(cx, arm)) { for arm in arms { check_arm(cx, true, arm.pat, arm.body, arm.guard.as_ref(), Some(els_arm.body)); } } } Some(IfLetOrMatch::IfLet(_, pat, body, els)) => { check_arm(cx, false, pat, body, None, els); } None => {} } }
identifier_body
collapsible_match.rs
use clippy_utils::diagnostics::span_lint_and_then; use clippy_utils::visitors::LocalUsedVisitor; use clippy_utils::{higher, is_lang_ctor, is_unit_expr, path_to_local, peel_ref_operators, SpanlessEq}; use if_chain::if_chain; use rustc_hir::LangItem::OptionNone; use rustc_hir::{Arm, Expr, ExprKind, Guard, HirId, MatchSource, Pat, PatKind, StmtKind}; use rustc_lint::{LateContext, LateLintPass}; use rustc_session::{declare_lint_pass, declare_tool_lint}; use rustc_span::{MultiSpan, Span}; declare_clippy_lint! { /// ### What it does /// Finds nested `match` or `if let` expressions where the patterns may be "collapsed" together /// without adding any branches. /// /// Note that this lint is not intended to find _all_ cases where nested match patterns can be merged, but only /// cases where merging would most likely make the code more readable. /// /// ### Why is this bad? /// It is unnecessarily verbose and complex. /// /// ### Example /// ```rust
/// Ok(n) => n, /// _ => return, /// } /// None => return, /// }; /// } /// ``` /// Use instead: /// ```rust /// fn func(opt: Option<Result<u64, String>>) { /// let n = match opt { /// Some(Ok(n)) => n, /// _ => return, /// }; /// } /// ``` pub COLLAPSIBLE_MATCH, style, "Nested `match` or `if let` expressions where the patterns may be \"collapsed\" together." } declare_lint_pass!(CollapsibleMatch => [COLLAPSIBLE_MATCH]); impl<'tcx> LateLintPass<'tcx> for CollapsibleMatch { fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &Expr<'tcx>) { match IfLetOrMatch::parse(cx, expr) { Some(IfLetOrMatch::Match(_, arms, _)) => { if let Some(els_arm) = arms.iter().rfind(|arm| arm_is_wild_like(cx, arm)) { for arm in arms { check_arm(cx, true, arm.pat, arm.body, arm.guard.as_ref(), Some(els_arm.body)); } } } Some(IfLetOrMatch::IfLet(_, pat, body, els)) => { check_arm(cx, false, pat, body, None, els); } None => {} } } } fn check_arm<'tcx>( cx: &LateContext<'tcx>, outer_is_match: bool, outer_pat: &'tcx Pat<'tcx>, outer_then_body: &'tcx Expr<'tcx>, outer_guard: Option<&'tcx Guard<'tcx>>, outer_else_body: Option<&'tcx Expr<'tcx>> ) { let inner_expr = strip_singleton_blocks(outer_then_body); if_chain! { if let Some(inner) = IfLetOrMatch::parse(cx, inner_expr); if let Some((inner_scrutinee, inner_then_pat, inner_else_body)) = match inner { IfLetOrMatch::IfLet(scrutinee, pat, _, els) => Some((scrutinee, pat, els)), IfLetOrMatch::Match(scrutinee, arms,..) => if_chain! { // if there are more than two arms, collapsing would be non-trivial if arms.len() == 2 && arms.iter().all(|a| a.guard.is_none()); // one of the arms must be "wild-like" if let Some(wild_idx) = arms.iter().rposition(|a| arm_is_wild_like(cx, a)); then { let (then, els) = (&arms[1 - wild_idx], &arms[wild_idx]); Some((scrutinee, then.pat, Some(els.body))) } else { None } }, }; if outer_pat.span.ctxt() == inner_scrutinee.span.ctxt(); // match expression must be a local binding // match <local> {.. } if let Some(binding_id) = path_to_local(peel_ref_operators(cx, inner_scrutinee)); if!pat_contains_or(inner_then_pat); // the binding must come from the pattern of the containing match arm //..<local>.. => match <local> {.. } if let Some(binding_span) = find_pat_binding(outer_pat, binding_id); // the "else" branches must be equal if match (outer_else_body, inner_else_body) { (None, None) => true, (None, Some(e)) | (Some(e), None) => is_unit_expr(e), (Some(a), Some(b)) => SpanlessEq::new(cx).eq_expr(a, b), }; // the binding must not be used in the if guard let mut used_visitor = LocalUsedVisitor::new(cx, binding_id); if outer_guard.map_or(true, |(Guard::If(e) | Guard::IfLet(_, e))|!used_visitor.check_expr(e)); //...or anywhere in the inner expression if match inner { IfLetOrMatch::IfLet(_, _, body, els) => { !used_visitor.check_expr(body) && els.map_or(true, |e|!used_visitor.check_expr(e)) }, IfLetOrMatch::Match(_, arms,..) =>!arms.iter().any(|arm| used_visitor.check_arm(arm)), }; then { let msg = format!( "this `{}` can be collapsed into the outer `{}`", if matches!(inner, IfLetOrMatch::Match(..)) { "match" } else { "if let" }, if outer_is_match { "match" } else { "if let" }, ); span_lint_and_then( cx, COLLAPSIBLE_MATCH, inner_expr.span, &msg, |diag| { let mut help_span = MultiSpan::from_spans(vec![binding_span, inner_then_pat.span]); help_span.push_span_label(binding_span, "replace this binding".into()); help_span.push_span_label(inner_then_pat.span, "with this pattern".into()); diag.span_help(help_span, "the outer pattern can be modified to include the inner pattern"); }, ); } } } fn strip_singleton_blocks<'hir>(mut expr: &'hir Expr<'hir>) -> &'hir Expr<'hir> { while let ExprKind::Block(block, _) = expr.kind { match (block.stmts, block.expr) { ([stmt], None) => match stmt.kind { StmtKind::Expr(e) | StmtKind::Semi(e) => expr = e, _ => break, }, ([], Some(e)) => expr = e, _ => break, } } expr } enum IfLetOrMatch<'hir> { Match(&'hir Expr<'hir>, &'hir [Arm<'hir>], MatchSource), /// scrutinee, pattern, then block, else block IfLet(&'hir Expr<'hir>, &'hir Pat<'hir>, &'hir Expr<'hir>, Option<&'hir Expr<'hir>>), } impl<'hir> IfLetOrMatch<'hir> { fn parse(cx: &LateContext<'_>, expr: &Expr<'hir>) -> Option<Self> { match expr.kind { ExprKind::Match(expr, arms, source) => Some(Self::Match(expr, arms, source)), _ => higher::IfLet::hir(cx, expr).map(|higher::IfLet { let_expr, let_pat, if_then, if_else }| { Self::IfLet(let_expr, let_pat, if_then, if_else) }) } } } /// A "wild-like" arm has a wild (`_`) or `None` pattern and no guard. Such arms can be "collapsed" /// into a single wild arm without any significant loss in semantics or readability. fn arm_is_wild_like(cx: &LateContext<'_>, arm: &Arm<'_>) -> bool { if arm.guard.is_some() { return false; } match arm.pat.kind { PatKind::Binding(..) | PatKind::Wild => true, PatKind::Path(ref qpath) => is_lang_ctor(cx, qpath, OptionNone), _ => false, } } fn find_pat_binding(pat: &Pat<'_>, hir_id: HirId) -> Option<Span> { let mut span = None; pat.walk_short(|p| match &p.kind { // ignore OR patterns PatKind::Or(_) => false, PatKind::Binding(_bm, _, _ident, _) => { let found = p.hir_id == hir_id; if found { span = Some(p.span); } !found }, _ => true, }); span } fn pat_contains_or(pat: &Pat<'_>) -> bool { let mut result = false; pat.walk(|p| { let is_or = matches!(p.kind, PatKind::Or(_)); result |= is_or; !is_or }); result }
/// fn func(opt: Option<Result<u64, String>>) { /// let n = match opt { /// Some(n) => match n {
random_line_split
fallback.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ use std::fmt; use std::sync::Arc; use anyhow::Error; use futures::{channel::mpsc::channel, SinkExt, StreamExt, TryStreamExt}; use tracing::error; use streams::select_drop; use crate::scmstore::{ BoxedReadStore, BoxedWriteStore, FetchError, FetchStream, KeyStream, ReadStore, }; /// A combinator which queries a preferred store, then falls back to a fallback store /// if a key is not found in the preferred store. Keys fetched from the fallback will /// be written to the write_store if available. pub struct
<K, VP, VF, VW> { /// The preferred store, which will always be queried. Usually a local store. pub preferred: BoxedReadStore<K, VP>, /// The fallback store, which will be queried if the value is not found in the /// primary store. pub fallback: BoxedReadStore<K, VF>, /// A `WriteStore` to which values read from the fallback store are written. Generally /// this will be the same as the preferred store. pub write_store: Option<BoxedWriteStore<K, VW>>, } const CHANNEL_BUFFER: usize = 200; impl<K, VP, VF, VW, VO> ReadStore<K, VO> for FallbackCache<K, VP, VF, VW> where K: fmt::Display + fmt::Debug + Send + Sync + Clone + Unpin +'static, // Preferred Value Type VP: Send + Sync + Clone +'static, // Fallback Value Type VF: Send + Sync + Clone +'static, // Write Value Type (must support conversion from fallback) VW: Send + Sync + Clone + From<VF> +'static, // Output Value Type (must support conversion from preferred & fallback) VO: Send + Sync + Clone + TryFrom<VF> + TryFrom<VP> +'static, // TODO(meyer): For now, we just require the conversion errors to convertible to anyhow::Error // We can probably loosen this later. In particular, we want to associate the key, at least. <VO as TryFrom<VF>>::Error: Into<Error>, <VO as TryFrom<VP>>::Error: Into<Error>, { fn fetch_stream(self: Arc<Self>, keys: KeyStream<K>) -> FetchStream<K, VO> { // TODO(meyer): Write a custom Stream implementation to try to avoid use of channels let (sender, receiver) = channel(CHANNEL_BUFFER); let preferred_stream = self .preferred .clone() .fetch_stream(keys) .filter_map(move |res| { let mut sender = sender.clone(); async move { use FetchError::*; match res { // Convert preferred values into output values Ok(v) => Some(v.try_into().map_err(FetchError::from)), // TODO(meyer): Looks like we aren't up to date with futures crate, missing "feed" method, which is probably better here. // I think this might serialize the fallback stream as-written. Err(NotFound(k)) => match sender.send(k.clone()).await { Ok(()) => None, Err(e) => Some(Err(FetchError::with_key(k, e))), }, // TODO(meyer): Should we also fall back on KeyedError, but also log an error? Err(e) => Some(Err(e)), } } }); let fallback_stream = self.fallback.clone().fetch_stream(Box::pin(receiver)); if let Some(ref write_store) = self.write_store { let (write_sender, write_receiver) = channel(CHANNEL_BUFFER); let fallback_stream = fallback_stream.and_then(move |v: VF| { let mut write_sender = write_sender.clone(); async move { // Convert fallback values to write values if let Err(e) = write_sender.send(v.clone().into()).await { // TODO(meyer): Eventually add better tracing support to these traits. Each combinator should have a span, etc. // TODO(meyer): Update tracing? Looks like we don't have access to the most recent version of the macro syntax. error!({ error = %e }, "error writing fallback value to channel"); } // Convert fallback values to output values v.try_into().map_err(FetchError::from) } }); // TODO(meyer): This whole "fake filter map" approach to driving the write stream forward seems bad. let write_results_null = write_store .clone() .write_stream(Box::pin(write_receiver)) // TODO(meyer): Don't swallow all write errors here. .filter_map(|_res| futures::future::ready(None)); // TODO(meyer): Implement `select_all_drop` if we continue with this approach Box::pin(select_drop( preferred_stream, select_drop(fallback_stream, write_results_null), )) } else { // Convert fallback values to output values Box::pin(select_drop( preferred_stream, fallback_stream.map(|r| r.and_then(|v| v.try_into().map_err(FetchError::from))), )) } } } /// A combinator which queries a preferred store, then falls back to a fallback store /// if a key is not found in the preferred store. Unlike `FallbackCache`, this type /// does not support writing. It is provided for cases where writing is not desired, and /// requiring a conversion to the write value type is nonsensical. pub struct Fallback<K, VP, VF> { /// The preferred store, which will always be queried. Usually a local store. pub preferred: BoxedReadStore<K, VP>, /// The fallback store, which will be queried if the value is not found in the /// primary store. pub fallback: BoxedReadStore<K, VF>, } impl<K, VP, VF, VO> ReadStore<K, VO> for Fallback<K, VP, VF> where K: fmt::Display + fmt::Debug + Send + Sync + Clone + Unpin +'static, // Preferred Value Type VP: Send + Sync + Clone +'static, // Fallback Value Type VF: Send + Sync + Clone +'static, // Output Value Type (must support conversion from preferred & fallback) VO: Send + Sync + Clone + From<VF> + From<VP> +'static, { fn fetch_stream(self: Arc<Self>, keys: KeyStream<K>) -> FetchStream<K, VO> { // TODO(meyer): Write a custom Stream implementation to try to avoid use of channels let (sender, receiver) = channel(CHANNEL_BUFFER); let preferred_stream = self .preferred .clone() .fetch_stream(keys) .filter_map(move |res| { let mut sender = sender.clone(); async move { use FetchError::*; match res { // Convert preferred values into output values Ok(v) => Some(Ok(v.into())), // TODO(meyer): Looks like we aren't up to date with futures crate, missing "feed" method, which is probably better here. // I think this might serialize the fallback stream as-written. Err(NotFound(k)) => match sender.send(k.clone()).await { Ok(()) => None, Err(e) => Some(Err(FetchError::with_key(k, e))), }, // TODO(meyer): Should we also fall back on KeyedError, but also log an error? Err(e) => Some(Err(e)), } } }); let fallback_stream = self .fallback .clone() .fetch_stream(Box::pin(receiver)) .map_ok(|v| v.into()); Box::pin(select_drop(preferred_stream, fallback_stream)) } }
FallbackCache
identifier_name
fallback.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ use std::fmt; use std::sync::Arc; use anyhow::Error; use futures::{channel::mpsc::channel, SinkExt, StreamExt, TryStreamExt};
use crate::scmstore::{ BoxedReadStore, BoxedWriteStore, FetchError, FetchStream, KeyStream, ReadStore, }; /// A combinator which queries a preferred store, then falls back to a fallback store /// if a key is not found in the preferred store. Keys fetched from the fallback will /// be written to the write_store if available. pub struct FallbackCache<K, VP, VF, VW> { /// The preferred store, which will always be queried. Usually a local store. pub preferred: BoxedReadStore<K, VP>, /// The fallback store, which will be queried if the value is not found in the /// primary store. pub fallback: BoxedReadStore<K, VF>, /// A `WriteStore` to which values read from the fallback store are written. Generally /// this will be the same as the preferred store. pub write_store: Option<BoxedWriteStore<K, VW>>, } const CHANNEL_BUFFER: usize = 200; impl<K, VP, VF, VW, VO> ReadStore<K, VO> for FallbackCache<K, VP, VF, VW> where K: fmt::Display + fmt::Debug + Send + Sync + Clone + Unpin +'static, // Preferred Value Type VP: Send + Sync + Clone +'static, // Fallback Value Type VF: Send + Sync + Clone +'static, // Write Value Type (must support conversion from fallback) VW: Send + Sync + Clone + From<VF> +'static, // Output Value Type (must support conversion from preferred & fallback) VO: Send + Sync + Clone + TryFrom<VF> + TryFrom<VP> +'static, // TODO(meyer): For now, we just require the conversion errors to convertible to anyhow::Error // We can probably loosen this later. In particular, we want to associate the key, at least. <VO as TryFrom<VF>>::Error: Into<Error>, <VO as TryFrom<VP>>::Error: Into<Error>, { fn fetch_stream(self: Arc<Self>, keys: KeyStream<K>) -> FetchStream<K, VO> { // TODO(meyer): Write a custom Stream implementation to try to avoid use of channels let (sender, receiver) = channel(CHANNEL_BUFFER); let preferred_stream = self .preferred .clone() .fetch_stream(keys) .filter_map(move |res| { let mut sender = sender.clone(); async move { use FetchError::*; match res { // Convert preferred values into output values Ok(v) => Some(v.try_into().map_err(FetchError::from)), // TODO(meyer): Looks like we aren't up to date with futures crate, missing "feed" method, which is probably better here. // I think this might serialize the fallback stream as-written. Err(NotFound(k)) => match sender.send(k.clone()).await { Ok(()) => None, Err(e) => Some(Err(FetchError::with_key(k, e))), }, // TODO(meyer): Should we also fall back on KeyedError, but also log an error? Err(e) => Some(Err(e)), } } }); let fallback_stream = self.fallback.clone().fetch_stream(Box::pin(receiver)); if let Some(ref write_store) = self.write_store { let (write_sender, write_receiver) = channel(CHANNEL_BUFFER); let fallback_stream = fallback_stream.and_then(move |v: VF| { let mut write_sender = write_sender.clone(); async move { // Convert fallback values to write values if let Err(e) = write_sender.send(v.clone().into()).await { // TODO(meyer): Eventually add better tracing support to these traits. Each combinator should have a span, etc. // TODO(meyer): Update tracing? Looks like we don't have access to the most recent version of the macro syntax. error!({ error = %e }, "error writing fallback value to channel"); } // Convert fallback values to output values v.try_into().map_err(FetchError::from) } }); // TODO(meyer): This whole "fake filter map" approach to driving the write stream forward seems bad. let write_results_null = write_store .clone() .write_stream(Box::pin(write_receiver)) // TODO(meyer): Don't swallow all write errors here. .filter_map(|_res| futures::future::ready(None)); // TODO(meyer): Implement `select_all_drop` if we continue with this approach Box::pin(select_drop( preferred_stream, select_drop(fallback_stream, write_results_null), )) } else { // Convert fallback values to output values Box::pin(select_drop( preferred_stream, fallback_stream.map(|r| r.and_then(|v| v.try_into().map_err(FetchError::from))), )) } } } /// A combinator which queries a preferred store, then falls back to a fallback store /// if a key is not found in the preferred store. Unlike `FallbackCache`, this type /// does not support writing. It is provided for cases where writing is not desired, and /// requiring a conversion to the write value type is nonsensical. pub struct Fallback<K, VP, VF> { /// The preferred store, which will always be queried. Usually a local store. pub preferred: BoxedReadStore<K, VP>, /// The fallback store, which will be queried if the value is not found in the /// primary store. pub fallback: BoxedReadStore<K, VF>, } impl<K, VP, VF, VO> ReadStore<K, VO> for Fallback<K, VP, VF> where K: fmt::Display + fmt::Debug + Send + Sync + Clone + Unpin +'static, // Preferred Value Type VP: Send + Sync + Clone +'static, // Fallback Value Type VF: Send + Sync + Clone +'static, // Output Value Type (must support conversion from preferred & fallback) VO: Send + Sync + Clone + From<VF> + From<VP> +'static, { fn fetch_stream(self: Arc<Self>, keys: KeyStream<K>) -> FetchStream<K, VO> { // TODO(meyer): Write a custom Stream implementation to try to avoid use of channels let (sender, receiver) = channel(CHANNEL_BUFFER); let preferred_stream = self .preferred .clone() .fetch_stream(keys) .filter_map(move |res| { let mut sender = sender.clone(); async move { use FetchError::*; match res { // Convert preferred values into output values Ok(v) => Some(Ok(v.into())), // TODO(meyer): Looks like we aren't up to date with futures crate, missing "feed" method, which is probably better here. // I think this might serialize the fallback stream as-written. Err(NotFound(k)) => match sender.send(k.clone()).await { Ok(()) => None, Err(e) => Some(Err(FetchError::with_key(k, e))), }, // TODO(meyer): Should we also fall back on KeyedError, but also log an error? Err(e) => Some(Err(e)), } } }); let fallback_stream = self .fallback .clone() .fetch_stream(Box::pin(receiver)) .map_ok(|v| v.into()); Box::pin(select_drop(preferred_stream, fallback_stream)) } }
use tracing::error; use streams::select_drop;
random_line_split
fallback.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ use std::fmt; use std::sync::Arc; use anyhow::Error; use futures::{channel::mpsc::channel, SinkExt, StreamExt, TryStreamExt}; use tracing::error; use streams::select_drop; use crate::scmstore::{ BoxedReadStore, BoxedWriteStore, FetchError, FetchStream, KeyStream, ReadStore, }; /// A combinator which queries a preferred store, then falls back to a fallback store /// if a key is not found in the preferred store. Keys fetched from the fallback will /// be written to the write_store if available. pub struct FallbackCache<K, VP, VF, VW> { /// The preferred store, which will always be queried. Usually a local store. pub preferred: BoxedReadStore<K, VP>, /// The fallback store, which will be queried if the value is not found in the /// primary store. pub fallback: BoxedReadStore<K, VF>, /// A `WriteStore` to which values read from the fallback store are written. Generally /// this will be the same as the preferred store. pub write_store: Option<BoxedWriteStore<K, VW>>, } const CHANNEL_BUFFER: usize = 200; impl<K, VP, VF, VW, VO> ReadStore<K, VO> for FallbackCache<K, VP, VF, VW> where K: fmt::Display + fmt::Debug + Send + Sync + Clone + Unpin +'static, // Preferred Value Type VP: Send + Sync + Clone +'static, // Fallback Value Type VF: Send + Sync + Clone +'static, // Write Value Type (must support conversion from fallback) VW: Send + Sync + Clone + From<VF> +'static, // Output Value Type (must support conversion from preferred & fallback) VO: Send + Sync + Clone + TryFrom<VF> + TryFrom<VP> +'static, // TODO(meyer): For now, we just require the conversion errors to convertible to anyhow::Error // We can probably loosen this later. In particular, we want to associate the key, at least. <VO as TryFrom<VF>>::Error: Into<Error>, <VO as TryFrom<VP>>::Error: Into<Error>, { fn fetch_stream(self: Arc<Self>, keys: KeyStream<K>) -> FetchStream<K, VO> { // TODO(meyer): Write a custom Stream implementation to try to avoid use of channels let (sender, receiver) = channel(CHANNEL_BUFFER); let preferred_stream = self .preferred .clone() .fetch_stream(keys) .filter_map(move |res| { let mut sender = sender.clone(); async move { use FetchError::*; match res { // Convert preferred values into output values Ok(v) => Some(v.try_into().map_err(FetchError::from)), // TODO(meyer): Looks like we aren't up to date with futures crate, missing "feed" method, which is probably better here. // I think this might serialize the fallback stream as-written. Err(NotFound(k)) => match sender.send(k.clone()).await { Ok(()) => None, Err(e) => Some(Err(FetchError::with_key(k, e))), }, // TODO(meyer): Should we also fall back on KeyedError, but also log an error? Err(e) => Some(Err(e)), } } }); let fallback_stream = self.fallback.clone().fetch_stream(Box::pin(receiver)); if let Some(ref write_store) = self.write_store { let (write_sender, write_receiver) = channel(CHANNEL_BUFFER); let fallback_stream = fallback_stream.and_then(move |v: VF| { let mut write_sender = write_sender.clone(); async move { // Convert fallback values to write values if let Err(e) = write_sender.send(v.clone().into()).await { // TODO(meyer): Eventually add better tracing support to these traits. Each combinator should have a span, etc. // TODO(meyer): Update tracing? Looks like we don't have access to the most recent version of the macro syntax. error!({ error = %e }, "error writing fallback value to channel"); } // Convert fallback values to output values v.try_into().map_err(FetchError::from) } }); // TODO(meyer): This whole "fake filter map" approach to driving the write stream forward seems bad. let write_results_null = write_store .clone() .write_stream(Box::pin(write_receiver)) // TODO(meyer): Don't swallow all write errors here. .filter_map(|_res| futures::future::ready(None)); // TODO(meyer): Implement `select_all_drop` if we continue with this approach Box::pin(select_drop( preferred_stream, select_drop(fallback_stream, write_results_null), )) } else { // Convert fallback values to output values Box::pin(select_drop( preferred_stream, fallback_stream.map(|r| r.and_then(|v| v.try_into().map_err(FetchError::from))), )) } } } /// A combinator which queries a preferred store, then falls back to a fallback store /// if a key is not found in the preferred store. Unlike `FallbackCache`, this type /// does not support writing. It is provided for cases where writing is not desired, and /// requiring a conversion to the write value type is nonsensical. pub struct Fallback<K, VP, VF> { /// The preferred store, which will always be queried. Usually a local store. pub preferred: BoxedReadStore<K, VP>, /// The fallback store, which will be queried if the value is not found in the /// primary store. pub fallback: BoxedReadStore<K, VF>, } impl<K, VP, VF, VO> ReadStore<K, VO> for Fallback<K, VP, VF> where K: fmt::Display + fmt::Debug + Send + Sync + Clone + Unpin +'static, // Preferred Value Type VP: Send + Sync + Clone +'static, // Fallback Value Type VF: Send + Sync + Clone +'static, // Output Value Type (must support conversion from preferred & fallback) VO: Send + Sync + Clone + From<VF> + From<VP> +'static, { fn fetch_stream(self: Arc<Self>, keys: KeyStream<K>) -> FetchStream<K, VO>
}, // TODO(meyer): Should we also fall back on KeyedError, but also log an error? Err(e) => Some(Err(e)), } } }); let fallback_stream = self .fallback .clone() .fetch_stream(Box::pin(receiver)) .map_ok(|v| v.into()); Box::pin(select_drop(preferred_stream, fallback_stream)) } }
{ // TODO(meyer): Write a custom Stream implementation to try to avoid use of channels let (sender, receiver) = channel(CHANNEL_BUFFER); let preferred_stream = self .preferred .clone() .fetch_stream(keys) .filter_map(move |res| { let mut sender = sender.clone(); async move { use FetchError::*; match res { // Convert preferred values into output values Ok(v) => Some(Ok(v.into())), // TODO(meyer): Looks like we aren't up to date with futures crate, missing "feed" method, which is probably better here. // I think this might serialize the fallback stream as-written. Err(NotFound(k)) => match sender.send(k.clone()).await { Ok(()) => None, Err(e) => Some(Err(FetchError::with_key(k, e))),
identifier_body
framerate.rs
//! Framerate control use libc; use libc::{c_void, size_t}; use std::mem; use ::get_error; use sys::gfx; /// Structure holding the state and timing information of the framerate controller. pub struct FPSManager { raw: *mut gfx::framerate::FPSmanager, } impl FPSManager { /// Create the framerate manager. pub fn new() -> FPSManager { unsafe { let size = mem::size_of::<gfx::framerate::FPSmanager>() as size_t; let raw = libc::malloc(size) as *mut gfx::framerate::FPSmanager; gfx::framerate::SDL_initFramerate(raw); FPSManager { raw: raw } } } /// Set the framerate in Hz. pub fn set_framerate(&mut self, rate: u32) -> Result<(), String> { let ret = unsafe { gfx::framerate::SDL_setFramerate(self.raw, rate as u32) }; match ret { 0 => Ok(()), _ => Err(get_error()) } } /// Return the current target framerate in Hz. pub fn
(&self) -> i32 { // will not get an error unsafe { gfx::framerate::SDL_getFramerate(self.raw) as i32 } } /// Return the current framecount. pub fn get_frame_count(&self) -> i32 { // will not get an error unsafe { gfx::framerate::SDL_getFramecount(self.raw) as i32 } } /// Delay execution to maintain a constant framerate and calculate fps. pub fn delay(&mut self) -> u32 { unsafe { gfx::framerate::SDL_framerateDelay(self.raw) as u32 } } } impl Drop for FPSManager { fn drop(&mut self) { unsafe { libc::free(self.raw as *mut c_void) } } }
get_framerate
identifier_name
framerate.rs
//! Framerate control use libc; use libc::{c_void, size_t}; use std::mem; use ::get_error; use sys::gfx; /// Structure holding the state and timing information of the framerate controller. pub struct FPSManager { raw: *mut gfx::framerate::FPSmanager, } impl FPSManager { /// Create the framerate manager. pub fn new() -> FPSManager { unsafe { let size = mem::size_of::<gfx::framerate::FPSmanager>() as size_t; let raw = libc::malloc(size) as *mut gfx::framerate::FPSmanager; gfx::framerate::SDL_initFramerate(raw); FPSManager { raw: raw } } } /// Set the framerate in Hz. pub fn set_framerate(&mut self, rate: u32) -> Result<(), String> { let ret = unsafe { gfx::framerate::SDL_setFramerate(self.raw, rate as u32) }; match ret { 0 => Ok(()), _ => Err(get_error()) } } /// Return the current target framerate in Hz. pub fn get_framerate(&self) -> i32 { // will not get an error unsafe { gfx::framerate::SDL_getFramerate(self.raw) as i32 } } /// Return the current framecount. pub fn get_frame_count(&self) -> i32
/// Delay execution to maintain a constant framerate and calculate fps. pub fn delay(&mut self) -> u32 { unsafe { gfx::framerate::SDL_framerateDelay(self.raw) as u32 } } } impl Drop for FPSManager { fn drop(&mut self) { unsafe { libc::free(self.raw as *mut c_void) } } }
{ // will not get an error unsafe { gfx::framerate::SDL_getFramecount(self.raw) as i32 } }
identifier_body
framerate.rs
//! Framerate control use libc; use libc::{c_void, size_t}; use std::mem; use ::get_error; use sys::gfx; /// Structure holding the state and timing information of the framerate controller. pub struct FPSManager { raw: *mut gfx::framerate::FPSmanager,
unsafe { let size = mem::size_of::<gfx::framerate::FPSmanager>() as size_t; let raw = libc::malloc(size) as *mut gfx::framerate::FPSmanager; gfx::framerate::SDL_initFramerate(raw); FPSManager { raw: raw } } } /// Set the framerate in Hz. pub fn set_framerate(&mut self, rate: u32) -> Result<(), String> { let ret = unsafe { gfx::framerate::SDL_setFramerate(self.raw, rate as u32) }; match ret { 0 => Ok(()), _ => Err(get_error()) } } /// Return the current target framerate in Hz. pub fn get_framerate(&self) -> i32 { // will not get an error unsafe { gfx::framerate::SDL_getFramerate(self.raw) as i32 } } /// Return the current framecount. pub fn get_frame_count(&self) -> i32 { // will not get an error unsafe { gfx::framerate::SDL_getFramecount(self.raw) as i32 } } /// Delay execution to maintain a constant framerate and calculate fps. pub fn delay(&mut self) -> u32 { unsafe { gfx::framerate::SDL_framerateDelay(self.raw) as u32 } } } impl Drop for FPSManager { fn drop(&mut self) { unsafe { libc::free(self.raw as *mut c_void) } } }
} impl FPSManager { /// Create the framerate manager. pub fn new() -> FPSManager {
random_line_split
ethkey.rs
// Copyright 2015, 2016 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity. If not, see <http://www.gnu.org/licenses/>. extern crate docopt; extern crate rustc_serialize; extern crate ethkey; use std::{env, fmt, process}; use std::num::ParseIntError; use docopt::Docopt; use rustc_serialize::hex::{FromHex, FromHexError}; use ethkey::{KeyPair, Random, Brain, Prefix, Error as EthkeyError, Generator, sign, verify_public, verify_address}; pub const USAGE: &'static str = r#" Ethereum keys generator. Copyright 2016 Ethcore (UK) Limited Usage: ethkey info <secret> [options] ethkey generate random [options] ethkey generate prefix <prefix> <iterations> [options] ethkey generate brain <seed> [options] ethkey sign <secret> <message> ethkey verify public <public> <signature> <message> ethkey verify address <address> <signature> <message> ethkey [-h | --help] Options: -h, --help Display this message and exit. -s, --secret Display only the secret. -p, --public Display only the public. -a, --address Display only the address. Commands: info Display public and address of the secret. generate Generates new ethereum key. random Random generation. prefix Random generation, but address must start with a prefix brain Generate new key from string seed. sign Sign message using secret. verify Verify signer of the signature. "#; #[derive(Debug, RustcDecodable)] struct Args { cmd_info: bool, cmd_generate: bool, cmd_random: bool, cmd_prefix: bool, cmd_brain: bool, cmd_sign: bool, cmd_verify: bool, cmd_public: bool, cmd_address: bool, arg_prefix: String, arg_iterations: String, arg_seed: String, arg_secret: String, arg_message: String, arg_public: String, arg_address: String, arg_signature: String, flag_secret: bool, flag_public: bool, flag_address: bool, } #[derive(Debug)] enum Error { Ethkey(EthkeyError), FromHex(FromHexError), ParseInt(ParseIntError), } impl From<EthkeyError> for Error { fn from(err: EthkeyError) -> Self { Error::Ethkey(err) } } impl From<FromHexError> for Error { fn from(err: FromHexError) -> Self { Error::FromHex(err) } } impl From<ParseIntError> for Error { fn from(err: ParseIntError) -> Self { Error::ParseInt(err) } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { match *self { Error::Ethkey(ref e) => write!(f, "{}", e), Error::FromHex(ref e) => write!(f, "{}", e), Error::ParseInt(ref e) => write!(f, "{}", e), } } } enum DisplayMode { KeyPair, Secret, Public, Address, } impl DisplayMode { fn new(args: &Args) -> Self { if args.flag_secret { DisplayMode::Secret } else if args.flag_public { DisplayMode::Public } else if args.flag_address { DisplayMode::Address } else { DisplayMode::KeyPair } } } fn main() { match execute(env::args()) { Ok(ok) => println!("{}", ok), Err(err) => { println!("{}", err); process::exit(1); }, } } fn display(keypair: KeyPair, mode: DisplayMode) -> String { match mode { DisplayMode::KeyPair => format!("{}", keypair), DisplayMode::Secret => format!("{:?}", keypair.secret()), DisplayMode::Public => format!("{:?}", keypair.public()), DisplayMode::Address => format!("{:?}", keypair.address()), } } fn execute<S, I>(command: I) -> Result<String, Error> where I: IntoIterator<Item=S>, S: AsRef<str> { let args: Args = Docopt::new(USAGE) .and_then(|d| d.argv(command).decode()) .unwrap_or_else(|e| e.exit()); return if args.cmd_info { let display_mode = DisplayMode::new(&args); let secret = args.arg_secret.parse().map_err(|_| EthkeyError::InvalidSecret)?; let keypair = KeyPair::from_secret(secret)?; Ok(display(keypair, display_mode)) } else if args.cmd_generate { let display_mode = DisplayMode::new(&args); let keypair = if args.cmd_random { Random.generate() } else if args.cmd_prefix { let prefix = args.arg_prefix.from_hex()?; let iterations = usize::from_str_radix(&args.arg_iterations, 10)?; Prefix::new(prefix, iterations).generate() } else if args.cmd_brain { Brain::new(args.arg_seed).generate() } else { unreachable!(); }; Ok(display(keypair?, display_mode)) } else if args.cmd_sign { let secret = args.arg_secret.parse().map_err(|_| EthkeyError::InvalidSecret)?; let message = args.arg_message.parse().map_err(|_| EthkeyError::InvalidMessage)?; let signature = sign(&secret, &message)?; Ok(format!("{}", signature)) } else if args.cmd_verify { let signature = args.arg_signature.parse().map_err(|_| EthkeyError::InvalidSignature)?; let message = args.arg_message.parse().map_err(|_| EthkeyError::InvalidMessage)?; let ok = if args.cmd_public { let public = args.arg_public.parse().map_err(|_| EthkeyError::InvalidPublic)?; verify_public(&public, &signature, &message)? } else if args.cmd_address { let address = args.arg_address.parse().map_err(|_| EthkeyError::InvalidAddress)?; verify_address(&address, &signature, &message)? } else { unreachable!(); }; Ok(format!("{}", ok)) } else { unreachable!(); } } #[cfg(test)] mod tests { use super::execute; #[test] fn info() { let command = vec!["ethkey", "info", "17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "secret: 17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55 public: 689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124 address: 26d1ec50b4e62c1d1a40d16e7cacc6a6580757d5".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn brain() { let command = vec!["ethkey", "generate", "brain", "this is sparta"] .into_iter() .map(Into::into) .collect::<Vec<String>>();
let expected = "secret: 17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55 public: 689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124 address: 26d1ec50b4e62c1d1a40d16e7cacc6a6580757d5".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn secret() { let command = vec!["ethkey", "generate", "brain", "this is sparta", "--secret"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn public() { let command = vec!["ethkey", "generate", "brain", "this is sparta", "--public"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn address() { let command = vec!["ethkey", "generate", "brain", "this is sparta", "--address"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "26d1ec50b4e62c1d1a40d16e7cacc6a6580757d5".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn sign() { let command = vec!["ethkey", "sign", "17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn verify_valid_public() { let command = vec!["ethkey", "verify", "public", "689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "true".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn verify_valid_address() { let command = vec!["ethkey", "verify", "address", "26d1ec50b4e62c1d1a40d16e7cacc6a6580757d5", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "true".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn verify_invalid() { let command = vec!["ethkey", "verify", "public", "689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec986"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "false".to_owned(); assert_eq!(execute(command).unwrap(), expected); } }
random_line_split
ethkey.rs
// Copyright 2015, 2016 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity. If not, see <http://www.gnu.org/licenses/>. extern crate docopt; extern crate rustc_serialize; extern crate ethkey; use std::{env, fmt, process}; use std::num::ParseIntError; use docopt::Docopt; use rustc_serialize::hex::{FromHex, FromHexError}; use ethkey::{KeyPair, Random, Brain, Prefix, Error as EthkeyError, Generator, sign, verify_public, verify_address}; pub const USAGE: &'static str = r#" Ethereum keys generator. Copyright 2016 Ethcore (UK) Limited Usage: ethkey info <secret> [options] ethkey generate random [options] ethkey generate prefix <prefix> <iterations> [options] ethkey generate brain <seed> [options] ethkey sign <secret> <message> ethkey verify public <public> <signature> <message> ethkey verify address <address> <signature> <message> ethkey [-h | --help] Options: -h, --help Display this message and exit. -s, --secret Display only the secret. -p, --public Display only the public. -a, --address Display only the address. Commands: info Display public and address of the secret. generate Generates new ethereum key. random Random generation. prefix Random generation, but address must start with a prefix brain Generate new key from string seed. sign Sign message using secret. verify Verify signer of the signature. "#; #[derive(Debug, RustcDecodable)] struct Args { cmd_info: bool, cmd_generate: bool, cmd_random: bool, cmd_prefix: bool, cmd_brain: bool, cmd_sign: bool, cmd_verify: bool, cmd_public: bool, cmd_address: bool, arg_prefix: String, arg_iterations: String, arg_seed: String, arg_secret: String, arg_message: String, arg_public: String, arg_address: String, arg_signature: String, flag_secret: bool, flag_public: bool, flag_address: bool, } #[derive(Debug)] enum Error { Ethkey(EthkeyError), FromHex(FromHexError), ParseInt(ParseIntError), } impl From<EthkeyError> for Error { fn from(err: EthkeyError) -> Self { Error::Ethkey(err) } } impl From<FromHexError> for Error { fn from(err: FromHexError) -> Self { Error::FromHex(err) } } impl From<ParseIntError> for Error { fn from(err: ParseIntError) -> Self { Error::ParseInt(err) } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { match *self { Error::Ethkey(ref e) => write!(f, "{}", e), Error::FromHex(ref e) => write!(f, "{}", e), Error::ParseInt(ref e) => write!(f, "{}", e), } } } enum DisplayMode { KeyPair, Secret, Public, Address, } impl DisplayMode { fn new(args: &Args) -> Self { if args.flag_secret { DisplayMode::Secret } else if args.flag_public { DisplayMode::Public } else if args.flag_address { DisplayMode::Address } else { DisplayMode::KeyPair } } } fn main() { match execute(env::args()) { Ok(ok) => println!("{}", ok), Err(err) => { println!("{}", err); process::exit(1); }, } } fn display(keypair: KeyPair, mode: DisplayMode) -> String { match mode { DisplayMode::KeyPair => format!("{}", keypair), DisplayMode::Secret => format!("{:?}", keypair.secret()), DisplayMode::Public => format!("{:?}", keypair.public()), DisplayMode::Address => format!("{:?}", keypair.address()), } } fn execute<S, I>(command: I) -> Result<String, Error> where I: IntoIterator<Item=S>, S: AsRef<str> { let args: Args = Docopt::new(USAGE) .and_then(|d| d.argv(command).decode()) .unwrap_or_else(|e| e.exit()); return if args.cmd_info { let display_mode = DisplayMode::new(&args); let secret = args.arg_secret.parse().map_err(|_| EthkeyError::InvalidSecret)?; let keypair = KeyPair::from_secret(secret)?; Ok(display(keypair, display_mode)) } else if args.cmd_generate { let display_mode = DisplayMode::new(&args); let keypair = if args.cmd_random { Random.generate() } else if args.cmd_prefix { let prefix = args.arg_prefix.from_hex()?; let iterations = usize::from_str_radix(&args.arg_iterations, 10)?; Prefix::new(prefix, iterations).generate() } else if args.cmd_brain { Brain::new(args.arg_seed).generate() } else { unreachable!(); }; Ok(display(keypair?, display_mode)) } else if args.cmd_sign { let secret = args.arg_secret.parse().map_err(|_| EthkeyError::InvalidSecret)?; let message = args.arg_message.parse().map_err(|_| EthkeyError::InvalidMessage)?; let signature = sign(&secret, &message)?; Ok(format!("{}", signature)) } else if args.cmd_verify { let signature = args.arg_signature.parse().map_err(|_| EthkeyError::InvalidSignature)?; let message = args.arg_message.parse().map_err(|_| EthkeyError::InvalidMessage)?; let ok = if args.cmd_public { let public = args.arg_public.parse().map_err(|_| EthkeyError::InvalidPublic)?; verify_public(&public, &signature, &message)? } else if args.cmd_address { let address = args.arg_address.parse().map_err(|_| EthkeyError::InvalidAddress)?; verify_address(&address, &signature, &message)? } else { unreachable!(); }; Ok(format!("{}", ok)) } else { unreachable!(); } } #[cfg(test)] mod tests { use super::execute; #[test] fn
() { let command = vec!["ethkey", "info", "17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "secret: 17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55 public: 689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124 address: 26d1ec50b4e62c1d1a40d16e7cacc6a6580757d5".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn brain() { let command = vec!["ethkey", "generate", "brain", "this is sparta"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "secret: 17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55 public: 689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124 address: 26d1ec50b4e62c1d1a40d16e7cacc6a6580757d5".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn secret() { let command = vec!["ethkey", "generate", "brain", "this is sparta", "--secret"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn public() { let command = vec!["ethkey", "generate", "brain", "this is sparta", "--public"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn address() { let command = vec!["ethkey", "generate", "brain", "this is sparta", "--address"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "26d1ec50b4e62c1d1a40d16e7cacc6a6580757d5".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn sign() { let command = vec!["ethkey", "sign", "17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn verify_valid_public() { let command = vec!["ethkey", "verify", "public", "689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "true".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn verify_valid_address() { let command = vec!["ethkey", "verify", "address", "26d1ec50b4e62c1d1a40d16e7cacc6a6580757d5", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "true".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn verify_invalid() { let command = vec!["ethkey", "verify", "public", "689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec986"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "false".to_owned(); assert_eq!(execute(command).unwrap(), expected); } }
info
identifier_name
ethkey.rs
// Copyright 2015, 2016 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity. If not, see <http://www.gnu.org/licenses/>. extern crate docopt; extern crate rustc_serialize; extern crate ethkey; use std::{env, fmt, process}; use std::num::ParseIntError; use docopt::Docopt; use rustc_serialize::hex::{FromHex, FromHexError}; use ethkey::{KeyPair, Random, Brain, Prefix, Error as EthkeyError, Generator, sign, verify_public, verify_address}; pub const USAGE: &'static str = r#" Ethereum keys generator. Copyright 2016 Ethcore (UK) Limited Usage: ethkey info <secret> [options] ethkey generate random [options] ethkey generate prefix <prefix> <iterations> [options] ethkey generate brain <seed> [options] ethkey sign <secret> <message> ethkey verify public <public> <signature> <message> ethkey verify address <address> <signature> <message> ethkey [-h | --help] Options: -h, --help Display this message and exit. -s, --secret Display only the secret. -p, --public Display only the public. -a, --address Display only the address. Commands: info Display public and address of the secret. generate Generates new ethereum key. random Random generation. prefix Random generation, but address must start with a prefix brain Generate new key from string seed. sign Sign message using secret. verify Verify signer of the signature. "#; #[derive(Debug, RustcDecodable)] struct Args { cmd_info: bool, cmd_generate: bool, cmd_random: bool, cmd_prefix: bool, cmd_brain: bool, cmd_sign: bool, cmd_verify: bool, cmd_public: bool, cmd_address: bool, arg_prefix: String, arg_iterations: String, arg_seed: String, arg_secret: String, arg_message: String, arg_public: String, arg_address: String, arg_signature: String, flag_secret: bool, flag_public: bool, flag_address: bool, } #[derive(Debug)] enum Error { Ethkey(EthkeyError), FromHex(FromHexError), ParseInt(ParseIntError), } impl From<EthkeyError> for Error { fn from(err: EthkeyError) -> Self { Error::Ethkey(err) } } impl From<FromHexError> for Error { fn from(err: FromHexError) -> Self { Error::FromHex(err) } } impl From<ParseIntError> for Error { fn from(err: ParseIntError) -> Self { Error::ParseInt(err) } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error>
} enum DisplayMode { KeyPair, Secret, Public, Address, } impl DisplayMode { fn new(args: &Args) -> Self { if args.flag_secret { DisplayMode::Secret } else if args.flag_public { DisplayMode::Public } else if args.flag_address { DisplayMode::Address } else { DisplayMode::KeyPair } } } fn main() { match execute(env::args()) { Ok(ok) => println!("{}", ok), Err(err) => { println!("{}", err); process::exit(1); }, } } fn display(keypair: KeyPair, mode: DisplayMode) -> String { match mode { DisplayMode::KeyPair => format!("{}", keypair), DisplayMode::Secret => format!("{:?}", keypair.secret()), DisplayMode::Public => format!("{:?}", keypair.public()), DisplayMode::Address => format!("{:?}", keypair.address()), } } fn execute<S, I>(command: I) -> Result<String, Error> where I: IntoIterator<Item=S>, S: AsRef<str> { let args: Args = Docopt::new(USAGE) .and_then(|d| d.argv(command).decode()) .unwrap_or_else(|e| e.exit()); return if args.cmd_info { let display_mode = DisplayMode::new(&args); let secret = args.arg_secret.parse().map_err(|_| EthkeyError::InvalidSecret)?; let keypair = KeyPair::from_secret(secret)?; Ok(display(keypair, display_mode)) } else if args.cmd_generate { let display_mode = DisplayMode::new(&args); let keypair = if args.cmd_random { Random.generate() } else if args.cmd_prefix { let prefix = args.arg_prefix.from_hex()?; let iterations = usize::from_str_radix(&args.arg_iterations, 10)?; Prefix::new(prefix, iterations).generate() } else if args.cmd_brain { Brain::new(args.arg_seed).generate() } else { unreachable!(); }; Ok(display(keypair?, display_mode)) } else if args.cmd_sign { let secret = args.arg_secret.parse().map_err(|_| EthkeyError::InvalidSecret)?; let message = args.arg_message.parse().map_err(|_| EthkeyError::InvalidMessage)?; let signature = sign(&secret, &message)?; Ok(format!("{}", signature)) } else if args.cmd_verify { let signature = args.arg_signature.parse().map_err(|_| EthkeyError::InvalidSignature)?; let message = args.arg_message.parse().map_err(|_| EthkeyError::InvalidMessage)?; let ok = if args.cmd_public { let public = args.arg_public.parse().map_err(|_| EthkeyError::InvalidPublic)?; verify_public(&public, &signature, &message)? } else if args.cmd_address { let address = args.arg_address.parse().map_err(|_| EthkeyError::InvalidAddress)?; verify_address(&address, &signature, &message)? } else { unreachable!(); }; Ok(format!("{}", ok)) } else { unreachable!(); } } #[cfg(test)] mod tests { use super::execute; #[test] fn info() { let command = vec!["ethkey", "info", "17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "secret: 17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55 public: 689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124 address: 26d1ec50b4e62c1d1a40d16e7cacc6a6580757d5".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn brain() { let command = vec!["ethkey", "generate", "brain", "this is sparta"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "secret: 17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55 public: 689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124 address: 26d1ec50b4e62c1d1a40d16e7cacc6a6580757d5".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn secret() { let command = vec!["ethkey", "generate", "brain", "this is sparta", "--secret"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn public() { let command = vec!["ethkey", "generate", "brain", "this is sparta", "--public"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn address() { let command = vec!["ethkey", "generate", "brain", "this is sparta", "--address"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "26d1ec50b4e62c1d1a40d16e7cacc6a6580757d5".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn sign() { let command = vec!["ethkey", "sign", "17d08f5fe8c77af811caa0c9a187e668ce3b74a99acc3f6d976f075fa8e0be55", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn verify_valid_public() { let command = vec!["ethkey", "verify", "public", "689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "true".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn verify_valid_address() { let command = vec!["ethkey", "verify", "address", "26d1ec50b4e62c1d1a40d16e7cacc6a6580757d5", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec987"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "true".to_owned(); assert_eq!(execute(command).unwrap(), expected); } #[test] fn verify_invalid() { let command = vec!["ethkey", "verify", "public", "689268c0ff57a20cd299fa60d3fb374862aff565b20b5f1767906a99e6e09f3ff04ca2b2a5cd22f62941db103c0356df1a8ed20ce322cab2483db67685afd124", "c1878cf60417151c766a712653d26ef350c8c75393458b7a9be715f053215af63dfd3b02c2ae65a8677917a8efa3172acb71cb90196e42106953ea0363c5aaf200", "bd50b7370c3f96733b31744c6c45079e7ae6c8d299613246d28ebcef507ec986"] .into_iter() .map(Into::into) .collect::<Vec<String>>(); let expected = "false".to_owned(); assert_eq!(execute(command).unwrap(), expected); } }
{ match *self { Error::Ethkey(ref e) => write!(f, "{}", e), Error::FromHex(ref e) => write!(f, "{}", e), Error::ParseInt(ref e) => write!(f, "{}", e), } }
identifier_body
vec-matching-autoslice.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. pub fn main()
{ let x = [1, 2, 3]; match x { [2, _, _] => panic!(), [1, a, b] => { assert!([a, b] == [2, 3]); } [_, _, _] => panic!(), } let y = ([(1, true), (2, false)], 0.5f64); match y { ([(1, a), (b, false)], _) => { assert_eq!(a, true); assert_eq!(b, 2); } ([_, _], 0.5) => panic!(), ([_, _], _) => panic!(), } }
identifier_body
vec-matching-autoslice.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. pub fn
() { let x = [1, 2, 3]; match x { [2, _, _] => panic!(), [1, a, b] => { assert!([a, b] == [2, 3]); } [_, _, _] => panic!(), } let y = ([(1, true), (2, false)], 0.5f64); match y { ([(1, a), (b, false)], _) => { assert_eq!(a, true); assert_eq!(b, 2); } ([_, _], 0.5) => panic!(), ([_, _], _) => panic!(), } }
main
identifier_name
vec-matching-autoslice.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. pub fn main() { let x = [1, 2, 3]; match x { [2, _, _] => panic!(), [1, a, b] => { assert!([a, b] == [2, 3]); } [_, _, _] => panic!(), } let y = ([(1, true), (2, false)], 0.5f64); match y { ([(1, a), (b, false)], _) => { assert_eq!(a, true); assert_eq!(b, 2); } ([_, _], 0.5) => panic!(), ([_, _], _) => panic!(), } }
random_line_split
kindck-inherited-copy-bound.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Test that Copy bounds inherited by trait are checked. #![feature(box_syntax)] use std::any::Any; trait Foo : Copy { fn
(&self) {} } impl<T:Copy> Foo for T { } fn take_param<T:Foo>(foo: &T) { } fn a() { let x = box 3; take_param(&x); //~ ERROR `core::marker::Copy` is not implemented } fn b() { let x = box 3; let y = &x; let z = &x as &Foo; //~ ERROR `core::marker::Copy` is not implemented } fn main() { }
foo
identifier_name
kindck-inherited-copy-bound.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Test that Copy bounds inherited by trait are checked. #![feature(box_syntax)] use std::any::Any; trait Foo : Copy { fn foo(&self)
} impl<T:Copy> Foo for T { } fn take_param<T:Foo>(foo: &T) { } fn a() { let x = box 3; take_param(&x); //~ ERROR `core::marker::Copy` is not implemented } fn b() { let x = box 3; let y = &x; let z = &x as &Foo; //~ ERROR `core::marker::Copy` is not implemented } fn main() { }
{}
identifier_body
kindck-inherited-copy-bound.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Test that Copy bounds inherited by trait are checked. #![feature(box_syntax)] use std::any::Any; trait Foo : Copy { fn foo(&self) {} } impl<T:Copy> Foo for T { } fn take_param<T:Foo>(foo: &T) { } fn a() { let x = box 3; take_param(&x); //~ ERROR `core::marker::Copy` is not implemented } fn b() { let x = box 3; let y = &x; let z = &x as &Foo; //~ ERROR `core::marker::Copy` is not implemented }
fn main() { }
random_line_split
config.rs
/* * Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use std::{convert::TryFrom, fmt, fmt::Formatter, net::SocketAddr, ops::Range}; use ipnetwork::IpNetwork; use schemars::JsonSchema; use serde::de::{self, Visitor}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use crate::{filters::ConvertProtoConfigError, map_proto_enum}; use super::quilkin::filters::firewall::v1alpha1::{ firewall::{Action as ProtoAction, PortRange as ProtoPortRange, Rule as ProtoRule}, Firewall as ProtoConfig, }; /// Represents how a Firewall filter is configured for read and write /// operations. #[derive(Clone, Deserialize, Debug, PartialEq, Serialize, JsonSchema)] #[non_exhaustive] pub struct Config { pub on_read: Vec<Rule>, pub on_write: Vec<Rule>, } /// Whether or not a matching [Rule] should Allow or Deny access #[derive(Clone, Deserialize, Debug, PartialEq, Serialize, JsonSchema)] pub enum Action { /// Matching rules will allow packets through. #[serde(rename = "ALLOW")] Allow, /// Matching rules will block packets. #[serde(rename = "DENY")] Deny, } /// Combination of CIDR range, port range and action to take. #[derive(Clone, Deserialize, Debug, PartialEq, Serialize, JsonSchema)] pub struct Rule { pub action: Action, /// ipv4 or ipv6 CIDR address. #[schemars(with = "String")] pub source: IpNetwork, pub ports: Vec<PortRange>, } impl Rule { /// Returns `true` if `address` matches the provided CIDR address as well /// as at least one of the port ranges in the [Rule]. /// /// # Examples /// ``` /// use quilkin::filters::firewall::{Action, PortRange}; /// /// let rule = quilkin::filters::firewall::Rule { /// action: Action::Allow, /// source: "192.168.75.0/24".parse().unwrap(), /// ports: vec![PortRange::new(10, 100).unwrap()], /// }; /// /// let ip = [192, 168, 75, 10]; /// assert!(rule.contains((ip, 50).into())); /// assert!(rule.contains((ip, 99).into())); /// assert!(rule.contains((ip, 10).into())); /// /// assert!(!rule.contains((ip, 5).into())); /// assert!(!rule.contains((ip, 1000).into())); /// assert!(!rule.contains(([192, 168, 76, 10], 40).into())); /// ``` pub fn contains(&self, address: SocketAddr) -> bool { if!self.source.contains(address.ip()) { return false; } self.ports .iter() .any(|range| range.contains(&address.port())) } } /// Invalid min and max values for a [PortRange]. #[derive(Debug, thiserror::Error)] pub enum PortRangeError { #[error("invalid port range: min {min:?} is greater than or equal to max {max:?}")] InvalidRange { min: u16, max: u16 }, } /// Range of matching ports that are configured against a [Rule]. #[derive(Clone, Debug, PartialEq, JsonSchema)] pub struct PortRange(Range<u16>); impl PortRange { /// Creates a new [PortRange], where min is inclusive, max is exclusive. /// [Result] will be a [PortRangeError] if `min >= max`. pub fn new(min: u16, max: u16) -> Result<Self, PortRangeError> { if min >= max { return Err(PortRangeError::InvalidRange { min, max }); } Ok(Self(Range { start: min, end: max, })) } /// Returns true if the range contain the given `port`. pub fn contains(&self, port: &u16) -> bool { self.0.contains(port) } } impl Serialize for PortRange { /// Serialise the [PortRange] into a single digit if min and max are the same /// otherwise, serialise it to "min-max". fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer,
} impl<'de> Deserialize<'de> for PortRange { /// Port ranges can be specified in yaml as either "10" as as single value /// or as "10-20" as a range, between a minimum and a maximum. /// This deserializes either format into a [PortRange]. fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { struct PortRangeVisitor; impl<'de> Visitor<'de> for PortRangeVisitor { type Value = PortRange; fn expecting(&self, f: &mut Formatter) -> Result<(), fmt::Error> { f.write_str("A port range in the format of '10' or '10-20'") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: de::Error, { match v.split_once('-') { None => { let value = v.parse::<u16>().map_err(de::Error::custom)?; PortRange::new(value, value + 1).map_err(de::Error::custom) } Some(split) => { let start = split.0.parse::<u16>().map_err(de::Error::custom)?; let end = split.1.parse::<u16>().map_err(de::Error::custom)?; PortRange::new(start, end).map_err(de::Error::custom) } } } } deserializer.deserialize_str(PortRangeVisitor) } } impl TryFrom<ProtoConfig> for Config { type Error = ConvertProtoConfigError; fn try_from(p: ProtoConfig) -> Result<Self, Self::Error> { fn convert_port(range: &ProtoPortRange) -> Result<PortRange, ConvertProtoConfigError> { let min = u16::try_from(range.min).map_err(|err| { ConvertProtoConfigError::new( format!("min too large: {err}"), Some("port.min".into()), ) })?; let max = u16::try_from(range.max).map_err(|err| { ConvertProtoConfigError::new( format!("max too large: {err}"), Some("port.max".into()), ) })?; PortRange::new(min, max) .map_err(|err| ConvertProtoConfigError::new(format!("{err}"), Some("ports".into()))) } fn convert_rule(rule: &ProtoRule) -> Result<Rule, ConvertProtoConfigError> { let action = map_proto_enum!( value = rule.action, field = "policy", proto_enum_type = ProtoAction, target_enum_type = Action, variants = [Allow, Deny] )?; let source = IpNetwork::try_from(rule.source.as_str()).map_err(|err| { ConvertProtoConfigError::new( format!("invalid source: {err:?}"), Some("source".into()), ) })?; let ports = rule .ports .iter() .map(convert_port) .collect::<Result<Vec<PortRange>, ConvertProtoConfigError>>()?; Ok(Rule { action, source, ports, }) } Ok(Config { on_read: p .on_read .iter() .map(convert_rule) .collect::<Result<Vec<Rule>, ConvertProtoConfigError>>()?, on_write: p .on_write .iter() .map(convert_rule) .collect::<Result<Vec<Rule>, ConvertProtoConfigError>>()?, }) } } #[cfg(test)] mod tests { use super::*; #[test] fn deserialize_yaml() { let yaml = " on_read: - action: ALLOW source: 192.168.51.0/24 ports: - 10 - 1000-7000 on_write: - action: DENY source: 192.168.75.0/24 ports: - 7000 "; let config: Config = serde_yaml::from_str(yaml).unwrap(); let rule1 = config.on_read[0].clone(); assert_eq!(rule1.action, Action::Allow); assert_eq!(rule1.source, "192.168.51.0/24".parse().unwrap()); assert_eq!(2, rule1.ports.len()); assert_eq!(10, rule1.ports[0].0.start); assert_eq!(11, rule1.ports[0].0.end); assert_eq!(1000, rule1.ports[1].0.start); assert_eq!(7000, rule1.ports[1].0.end); let rule2 = config.on_write[0].clone(); assert_eq!(rule2.action, Action::Deny); assert_eq!(rule2.source, "192.168.75.0/24".parse().unwrap()); assert_eq!(1, rule2.ports.len()); assert_eq!(7000, rule2.ports[0].0.start); assert_eq!(7001, rule2.ports[0].0.end); } #[test] fn portrange_contains() { let range = PortRange::new(10, 100).unwrap(); assert!(range.contains(&10)); assert!(!range.contains(&100)); assert!(range.contains(&50)); assert!(!range.contains(&200)); assert!(!range.contains(&5)); // single value let single = PortRange::new(10, 11).unwrap(); assert!(single.contains(&10)); assert!(!single.contains(&11)); } #[test] fn convert() { let proto_config = ProtoConfig { on_read: vec![ProtoRule { action: ProtoAction::Allow as i32, source: "192.168.75.0/24".into(), ports: vec![ProtoPortRange { min: 10, max: 100 }], }], on_write: vec![ProtoRule { action: ProtoAction::Deny as i32, source: "192.168.124.0/24".into(), ports: vec![ProtoPortRange { min: 50, max: 51 }], }], }; let config = Config::try_from(proto_config).unwrap(); let rule1 = config.on_read[0].clone(); assert_eq!(rule1.action, Action::Allow); assert_eq!(rule1.source, "192.168.75.0/24".parse().unwrap()); assert_eq!(1, rule1.ports.len()); assert_eq!(10, rule1.ports[0].0.start); assert_eq!(100, rule1.ports[0].0.end); let rule2 = config.on_write[0].clone(); assert_eq!(rule2.action, Action::Deny); assert_eq!(rule2.source, "192.168.124.0/24".parse().unwrap()); assert_eq!(1, rule2.ports.len()); assert_eq!(50, rule2.ports[0].0.start); assert_eq!(51, rule2.ports[0].0.end); } #[test] fn rule_contains() { let rule = Rule { action: Action::Allow, source: "192.168.75.0/24".parse().unwrap(), ports: vec![PortRange::new(10, 100).unwrap()], }; let ip = [192, 168, 75, 10]; assert!(rule.contains((ip, 50).into())); assert!(rule.contains((ip, 99).into())); assert!(rule.contains((ip, 10).into())); assert!(!rule.contains((ip, 5).into())); assert!(!rule.contains((ip, 1000).into())); assert!(!rule.contains(([192, 168, 76, 10], 40).into())); } }
{ if self.0.start == (self.0.end - 1) { return serializer.serialize_str(self.0.start.to_string().as_str()); } let range = format!("{}-{}", self.0.start, self.0.end); serializer.serialize_str(range.as_str()) }
identifier_body
config.rs
/* * Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use std::{convert::TryFrom, fmt, fmt::Formatter, net::SocketAddr, ops::Range}; use ipnetwork::IpNetwork; use schemars::JsonSchema; use serde::de::{self, Visitor}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use crate::{filters::ConvertProtoConfigError, map_proto_enum}; use super::quilkin::filters::firewall::v1alpha1::{ firewall::{Action as ProtoAction, PortRange as ProtoPortRange, Rule as ProtoRule}, Firewall as ProtoConfig, }; /// Represents how a Firewall filter is configured for read and write /// operations. #[derive(Clone, Deserialize, Debug, PartialEq, Serialize, JsonSchema)] #[non_exhaustive] pub struct Config { pub on_read: Vec<Rule>, pub on_write: Vec<Rule>, } /// Whether or not a matching [Rule] should Allow or Deny access #[derive(Clone, Deserialize, Debug, PartialEq, Serialize, JsonSchema)] pub enum Action { /// Matching rules will allow packets through. #[serde(rename = "ALLOW")] Allow, /// Matching rules will block packets. #[serde(rename = "DENY")] Deny, } /// Combination of CIDR range, port range and action to take. #[derive(Clone, Deserialize, Debug, PartialEq, Serialize, JsonSchema)] pub struct Rule { pub action: Action, /// ipv4 or ipv6 CIDR address. #[schemars(with = "String")] pub source: IpNetwork, pub ports: Vec<PortRange>, } impl Rule { /// Returns `true` if `address` matches the provided CIDR address as well /// as at least one of the port ranges in the [Rule]. /// /// # Examples /// ``` /// use quilkin::filters::firewall::{Action, PortRange}; /// /// let rule = quilkin::filters::firewall::Rule { /// action: Action::Allow, /// source: "192.168.75.0/24".parse().unwrap(), /// ports: vec![PortRange::new(10, 100).unwrap()], /// }; /// /// let ip = [192, 168, 75, 10]; /// assert!(rule.contains((ip, 50).into())); /// assert!(rule.contains((ip, 99).into())); /// assert!(rule.contains((ip, 10).into())); /// /// assert!(!rule.contains((ip, 5).into())); /// assert!(!rule.contains((ip, 1000).into())); /// assert!(!rule.contains(([192, 168, 76, 10], 40).into())); /// ``` pub fn contains(&self, address: SocketAddr) -> bool { if!self.source.contains(address.ip())
self.ports .iter() .any(|range| range.contains(&address.port())) } } /// Invalid min and max values for a [PortRange]. #[derive(Debug, thiserror::Error)] pub enum PortRangeError { #[error("invalid port range: min {min:?} is greater than or equal to max {max:?}")] InvalidRange { min: u16, max: u16 }, } /// Range of matching ports that are configured against a [Rule]. #[derive(Clone, Debug, PartialEq, JsonSchema)] pub struct PortRange(Range<u16>); impl PortRange { /// Creates a new [PortRange], where min is inclusive, max is exclusive. /// [Result] will be a [PortRangeError] if `min >= max`. pub fn new(min: u16, max: u16) -> Result<Self, PortRangeError> { if min >= max { return Err(PortRangeError::InvalidRange { min, max }); } Ok(Self(Range { start: min, end: max, })) } /// Returns true if the range contain the given `port`. pub fn contains(&self, port: &u16) -> bool { self.0.contains(port) } } impl Serialize for PortRange { /// Serialise the [PortRange] into a single digit if min and max are the same /// otherwise, serialise it to "min-max". fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { if self.0.start == (self.0.end - 1) { return serializer.serialize_str(self.0.start.to_string().as_str()); } let range = format!("{}-{}", self.0.start, self.0.end); serializer.serialize_str(range.as_str()) } } impl<'de> Deserialize<'de> for PortRange { /// Port ranges can be specified in yaml as either "10" as as single value /// or as "10-20" as a range, between a minimum and a maximum. /// This deserializes either format into a [PortRange]. fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { struct PortRangeVisitor; impl<'de> Visitor<'de> for PortRangeVisitor { type Value = PortRange; fn expecting(&self, f: &mut Formatter) -> Result<(), fmt::Error> { f.write_str("A port range in the format of '10' or '10-20'") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: de::Error, { match v.split_once('-') { None => { let value = v.parse::<u16>().map_err(de::Error::custom)?; PortRange::new(value, value + 1).map_err(de::Error::custom) } Some(split) => { let start = split.0.parse::<u16>().map_err(de::Error::custom)?; let end = split.1.parse::<u16>().map_err(de::Error::custom)?; PortRange::new(start, end).map_err(de::Error::custom) } } } } deserializer.deserialize_str(PortRangeVisitor) } } impl TryFrom<ProtoConfig> for Config { type Error = ConvertProtoConfigError; fn try_from(p: ProtoConfig) -> Result<Self, Self::Error> { fn convert_port(range: &ProtoPortRange) -> Result<PortRange, ConvertProtoConfigError> { let min = u16::try_from(range.min).map_err(|err| { ConvertProtoConfigError::new( format!("min too large: {err}"), Some("port.min".into()), ) })?; let max = u16::try_from(range.max).map_err(|err| { ConvertProtoConfigError::new( format!("max too large: {err}"), Some("port.max".into()), ) })?; PortRange::new(min, max) .map_err(|err| ConvertProtoConfigError::new(format!("{err}"), Some("ports".into()))) } fn convert_rule(rule: &ProtoRule) -> Result<Rule, ConvertProtoConfigError> { let action = map_proto_enum!( value = rule.action, field = "policy", proto_enum_type = ProtoAction, target_enum_type = Action, variants = [Allow, Deny] )?; let source = IpNetwork::try_from(rule.source.as_str()).map_err(|err| { ConvertProtoConfigError::new( format!("invalid source: {err:?}"), Some("source".into()), ) })?; let ports = rule .ports .iter() .map(convert_port) .collect::<Result<Vec<PortRange>, ConvertProtoConfigError>>()?; Ok(Rule { action, source, ports, }) } Ok(Config { on_read: p .on_read .iter() .map(convert_rule) .collect::<Result<Vec<Rule>, ConvertProtoConfigError>>()?, on_write: p .on_write .iter() .map(convert_rule) .collect::<Result<Vec<Rule>, ConvertProtoConfigError>>()?, }) } } #[cfg(test)] mod tests { use super::*; #[test] fn deserialize_yaml() { let yaml = " on_read: - action: ALLOW source: 192.168.51.0/24 ports: - 10 - 1000-7000 on_write: - action: DENY source: 192.168.75.0/24 ports: - 7000 "; let config: Config = serde_yaml::from_str(yaml).unwrap(); let rule1 = config.on_read[0].clone(); assert_eq!(rule1.action, Action::Allow); assert_eq!(rule1.source, "192.168.51.0/24".parse().unwrap()); assert_eq!(2, rule1.ports.len()); assert_eq!(10, rule1.ports[0].0.start); assert_eq!(11, rule1.ports[0].0.end); assert_eq!(1000, rule1.ports[1].0.start); assert_eq!(7000, rule1.ports[1].0.end); let rule2 = config.on_write[0].clone(); assert_eq!(rule2.action, Action::Deny); assert_eq!(rule2.source, "192.168.75.0/24".parse().unwrap()); assert_eq!(1, rule2.ports.len()); assert_eq!(7000, rule2.ports[0].0.start); assert_eq!(7001, rule2.ports[0].0.end); } #[test] fn portrange_contains() { let range = PortRange::new(10, 100).unwrap(); assert!(range.contains(&10)); assert!(!range.contains(&100)); assert!(range.contains(&50)); assert!(!range.contains(&200)); assert!(!range.contains(&5)); // single value let single = PortRange::new(10, 11).unwrap(); assert!(single.contains(&10)); assert!(!single.contains(&11)); } #[test] fn convert() { let proto_config = ProtoConfig { on_read: vec![ProtoRule { action: ProtoAction::Allow as i32, source: "192.168.75.0/24".into(), ports: vec![ProtoPortRange { min: 10, max: 100 }], }], on_write: vec![ProtoRule { action: ProtoAction::Deny as i32, source: "192.168.124.0/24".into(), ports: vec![ProtoPortRange { min: 50, max: 51 }], }], }; let config = Config::try_from(proto_config).unwrap(); let rule1 = config.on_read[0].clone(); assert_eq!(rule1.action, Action::Allow); assert_eq!(rule1.source, "192.168.75.0/24".parse().unwrap()); assert_eq!(1, rule1.ports.len()); assert_eq!(10, rule1.ports[0].0.start); assert_eq!(100, rule1.ports[0].0.end); let rule2 = config.on_write[0].clone(); assert_eq!(rule2.action, Action::Deny); assert_eq!(rule2.source, "192.168.124.0/24".parse().unwrap()); assert_eq!(1, rule2.ports.len()); assert_eq!(50, rule2.ports[0].0.start); assert_eq!(51, rule2.ports[0].0.end); } #[test] fn rule_contains() { let rule = Rule { action: Action::Allow, source: "192.168.75.0/24".parse().unwrap(), ports: vec![PortRange::new(10, 100).unwrap()], }; let ip = [192, 168, 75, 10]; assert!(rule.contains((ip, 50).into())); assert!(rule.contains((ip, 99).into())); assert!(rule.contains((ip, 10).into())); assert!(!rule.contains((ip, 5).into())); assert!(!rule.contains((ip, 1000).into())); assert!(!rule.contains(([192, 168, 76, 10], 40).into())); } }
{ return false; }
conditional_block
config.rs
/* * Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use std::{convert::TryFrom, fmt, fmt::Formatter, net::SocketAddr, ops::Range}; use ipnetwork::IpNetwork;
use schemars::JsonSchema; use serde::de::{self, Visitor}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use crate::{filters::ConvertProtoConfigError, map_proto_enum}; use super::quilkin::filters::firewall::v1alpha1::{ firewall::{Action as ProtoAction, PortRange as ProtoPortRange, Rule as ProtoRule}, Firewall as ProtoConfig, }; /// Represents how a Firewall filter is configured for read and write /// operations. #[derive(Clone, Deserialize, Debug, PartialEq, Serialize, JsonSchema)] #[non_exhaustive] pub struct Config { pub on_read: Vec<Rule>, pub on_write: Vec<Rule>, } /// Whether or not a matching [Rule] should Allow or Deny access #[derive(Clone, Deserialize, Debug, PartialEq, Serialize, JsonSchema)] pub enum Action { /// Matching rules will allow packets through. #[serde(rename = "ALLOW")] Allow, /// Matching rules will block packets. #[serde(rename = "DENY")] Deny, } /// Combination of CIDR range, port range and action to take. #[derive(Clone, Deserialize, Debug, PartialEq, Serialize, JsonSchema)] pub struct Rule { pub action: Action, /// ipv4 or ipv6 CIDR address. #[schemars(with = "String")] pub source: IpNetwork, pub ports: Vec<PortRange>, } impl Rule { /// Returns `true` if `address` matches the provided CIDR address as well /// as at least one of the port ranges in the [Rule]. /// /// # Examples /// ``` /// use quilkin::filters::firewall::{Action, PortRange}; /// /// let rule = quilkin::filters::firewall::Rule { /// action: Action::Allow, /// source: "192.168.75.0/24".parse().unwrap(), /// ports: vec![PortRange::new(10, 100).unwrap()], /// }; /// /// let ip = [192, 168, 75, 10]; /// assert!(rule.contains((ip, 50).into())); /// assert!(rule.contains((ip, 99).into())); /// assert!(rule.contains((ip, 10).into())); /// /// assert!(!rule.contains((ip, 5).into())); /// assert!(!rule.contains((ip, 1000).into())); /// assert!(!rule.contains(([192, 168, 76, 10], 40).into())); /// ``` pub fn contains(&self, address: SocketAddr) -> bool { if!self.source.contains(address.ip()) { return false; } self.ports .iter() .any(|range| range.contains(&address.port())) } } /// Invalid min and max values for a [PortRange]. #[derive(Debug, thiserror::Error)] pub enum PortRangeError { #[error("invalid port range: min {min:?} is greater than or equal to max {max:?}")] InvalidRange { min: u16, max: u16 }, } /// Range of matching ports that are configured against a [Rule]. #[derive(Clone, Debug, PartialEq, JsonSchema)] pub struct PortRange(Range<u16>); impl PortRange { /// Creates a new [PortRange], where min is inclusive, max is exclusive. /// [Result] will be a [PortRangeError] if `min >= max`. pub fn new(min: u16, max: u16) -> Result<Self, PortRangeError> { if min >= max { return Err(PortRangeError::InvalidRange { min, max }); } Ok(Self(Range { start: min, end: max, })) } /// Returns true if the range contain the given `port`. pub fn contains(&self, port: &u16) -> bool { self.0.contains(port) } } impl Serialize for PortRange { /// Serialise the [PortRange] into a single digit if min and max are the same /// otherwise, serialise it to "min-max". fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { if self.0.start == (self.0.end - 1) { return serializer.serialize_str(self.0.start.to_string().as_str()); } let range = format!("{}-{}", self.0.start, self.0.end); serializer.serialize_str(range.as_str()) } } impl<'de> Deserialize<'de> for PortRange { /// Port ranges can be specified in yaml as either "10" as as single value /// or as "10-20" as a range, between a minimum and a maximum. /// This deserializes either format into a [PortRange]. fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { struct PortRangeVisitor; impl<'de> Visitor<'de> for PortRangeVisitor { type Value = PortRange; fn expecting(&self, f: &mut Formatter) -> Result<(), fmt::Error> { f.write_str("A port range in the format of '10' or '10-20'") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: de::Error, { match v.split_once('-') { None => { let value = v.parse::<u16>().map_err(de::Error::custom)?; PortRange::new(value, value + 1).map_err(de::Error::custom) } Some(split) => { let start = split.0.parse::<u16>().map_err(de::Error::custom)?; let end = split.1.parse::<u16>().map_err(de::Error::custom)?; PortRange::new(start, end).map_err(de::Error::custom) } } } } deserializer.deserialize_str(PortRangeVisitor) } } impl TryFrom<ProtoConfig> for Config { type Error = ConvertProtoConfigError; fn try_from(p: ProtoConfig) -> Result<Self, Self::Error> { fn convert_port(range: &ProtoPortRange) -> Result<PortRange, ConvertProtoConfigError> { let min = u16::try_from(range.min).map_err(|err| { ConvertProtoConfigError::new( format!("min too large: {err}"), Some("port.min".into()), ) })?; let max = u16::try_from(range.max).map_err(|err| { ConvertProtoConfigError::new( format!("max too large: {err}"), Some("port.max".into()), ) })?; PortRange::new(min, max) .map_err(|err| ConvertProtoConfigError::new(format!("{err}"), Some("ports".into()))) } fn convert_rule(rule: &ProtoRule) -> Result<Rule, ConvertProtoConfigError> { let action = map_proto_enum!( value = rule.action, field = "policy", proto_enum_type = ProtoAction, target_enum_type = Action, variants = [Allow, Deny] )?; let source = IpNetwork::try_from(rule.source.as_str()).map_err(|err| { ConvertProtoConfigError::new( format!("invalid source: {err:?}"), Some("source".into()), ) })?; let ports = rule .ports .iter() .map(convert_port) .collect::<Result<Vec<PortRange>, ConvertProtoConfigError>>()?; Ok(Rule { action, source, ports, }) } Ok(Config { on_read: p .on_read .iter() .map(convert_rule) .collect::<Result<Vec<Rule>, ConvertProtoConfigError>>()?, on_write: p .on_write .iter() .map(convert_rule) .collect::<Result<Vec<Rule>, ConvertProtoConfigError>>()?, }) } } #[cfg(test)] mod tests { use super::*; #[test] fn deserialize_yaml() { let yaml = " on_read: - action: ALLOW source: 192.168.51.0/24 ports: - 10 - 1000-7000 on_write: - action: DENY source: 192.168.75.0/24 ports: - 7000 "; let config: Config = serde_yaml::from_str(yaml).unwrap(); let rule1 = config.on_read[0].clone(); assert_eq!(rule1.action, Action::Allow); assert_eq!(rule1.source, "192.168.51.0/24".parse().unwrap()); assert_eq!(2, rule1.ports.len()); assert_eq!(10, rule1.ports[0].0.start); assert_eq!(11, rule1.ports[0].0.end); assert_eq!(1000, rule1.ports[1].0.start); assert_eq!(7000, rule1.ports[1].0.end); let rule2 = config.on_write[0].clone(); assert_eq!(rule2.action, Action::Deny); assert_eq!(rule2.source, "192.168.75.0/24".parse().unwrap()); assert_eq!(1, rule2.ports.len()); assert_eq!(7000, rule2.ports[0].0.start); assert_eq!(7001, rule2.ports[0].0.end); } #[test] fn portrange_contains() { let range = PortRange::new(10, 100).unwrap(); assert!(range.contains(&10)); assert!(!range.contains(&100)); assert!(range.contains(&50)); assert!(!range.contains(&200)); assert!(!range.contains(&5)); // single value let single = PortRange::new(10, 11).unwrap(); assert!(single.contains(&10)); assert!(!single.contains(&11)); } #[test] fn convert() { let proto_config = ProtoConfig { on_read: vec![ProtoRule { action: ProtoAction::Allow as i32, source: "192.168.75.0/24".into(), ports: vec![ProtoPortRange { min: 10, max: 100 }], }], on_write: vec![ProtoRule { action: ProtoAction::Deny as i32, source: "192.168.124.0/24".into(), ports: vec![ProtoPortRange { min: 50, max: 51 }], }], }; let config = Config::try_from(proto_config).unwrap(); let rule1 = config.on_read[0].clone(); assert_eq!(rule1.action, Action::Allow); assert_eq!(rule1.source, "192.168.75.0/24".parse().unwrap()); assert_eq!(1, rule1.ports.len()); assert_eq!(10, rule1.ports[0].0.start); assert_eq!(100, rule1.ports[0].0.end); let rule2 = config.on_write[0].clone(); assert_eq!(rule2.action, Action::Deny); assert_eq!(rule2.source, "192.168.124.0/24".parse().unwrap()); assert_eq!(1, rule2.ports.len()); assert_eq!(50, rule2.ports[0].0.start); assert_eq!(51, rule2.ports[0].0.end); } #[test] fn rule_contains() { let rule = Rule { action: Action::Allow, source: "192.168.75.0/24".parse().unwrap(), ports: vec![PortRange::new(10, 100).unwrap()], }; let ip = [192, 168, 75, 10]; assert!(rule.contains((ip, 50).into())); assert!(rule.contains((ip, 99).into())); assert!(rule.contains((ip, 10).into())); assert!(!rule.contains((ip, 5).into())); assert!(!rule.contains((ip, 1000).into())); assert!(!rule.contains(([192, 168, 76, 10], 40).into())); } }
random_line_split
config.rs
/* * Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use std::{convert::TryFrom, fmt, fmt::Formatter, net::SocketAddr, ops::Range}; use ipnetwork::IpNetwork; use schemars::JsonSchema; use serde::de::{self, Visitor}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use crate::{filters::ConvertProtoConfigError, map_proto_enum}; use super::quilkin::filters::firewall::v1alpha1::{ firewall::{Action as ProtoAction, PortRange as ProtoPortRange, Rule as ProtoRule}, Firewall as ProtoConfig, }; /// Represents how a Firewall filter is configured for read and write /// operations. #[derive(Clone, Deserialize, Debug, PartialEq, Serialize, JsonSchema)] #[non_exhaustive] pub struct Config { pub on_read: Vec<Rule>, pub on_write: Vec<Rule>, } /// Whether or not a matching [Rule] should Allow or Deny access #[derive(Clone, Deserialize, Debug, PartialEq, Serialize, JsonSchema)] pub enum Action { /// Matching rules will allow packets through. #[serde(rename = "ALLOW")] Allow, /// Matching rules will block packets. #[serde(rename = "DENY")] Deny, } /// Combination of CIDR range, port range and action to take. #[derive(Clone, Deserialize, Debug, PartialEq, Serialize, JsonSchema)] pub struct
{ pub action: Action, /// ipv4 or ipv6 CIDR address. #[schemars(with = "String")] pub source: IpNetwork, pub ports: Vec<PortRange>, } impl Rule { /// Returns `true` if `address` matches the provided CIDR address as well /// as at least one of the port ranges in the [Rule]. /// /// # Examples /// ``` /// use quilkin::filters::firewall::{Action, PortRange}; /// /// let rule = quilkin::filters::firewall::Rule { /// action: Action::Allow, /// source: "192.168.75.0/24".parse().unwrap(), /// ports: vec![PortRange::new(10, 100).unwrap()], /// }; /// /// let ip = [192, 168, 75, 10]; /// assert!(rule.contains((ip, 50).into())); /// assert!(rule.contains((ip, 99).into())); /// assert!(rule.contains((ip, 10).into())); /// /// assert!(!rule.contains((ip, 5).into())); /// assert!(!rule.contains((ip, 1000).into())); /// assert!(!rule.contains(([192, 168, 76, 10], 40).into())); /// ``` pub fn contains(&self, address: SocketAddr) -> bool { if!self.source.contains(address.ip()) { return false; } self.ports .iter() .any(|range| range.contains(&address.port())) } } /// Invalid min and max values for a [PortRange]. #[derive(Debug, thiserror::Error)] pub enum PortRangeError { #[error("invalid port range: min {min:?} is greater than or equal to max {max:?}")] InvalidRange { min: u16, max: u16 }, } /// Range of matching ports that are configured against a [Rule]. #[derive(Clone, Debug, PartialEq, JsonSchema)] pub struct PortRange(Range<u16>); impl PortRange { /// Creates a new [PortRange], where min is inclusive, max is exclusive. /// [Result] will be a [PortRangeError] if `min >= max`. pub fn new(min: u16, max: u16) -> Result<Self, PortRangeError> { if min >= max { return Err(PortRangeError::InvalidRange { min, max }); } Ok(Self(Range { start: min, end: max, })) } /// Returns true if the range contain the given `port`. pub fn contains(&self, port: &u16) -> bool { self.0.contains(port) } } impl Serialize for PortRange { /// Serialise the [PortRange] into a single digit if min and max are the same /// otherwise, serialise it to "min-max". fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { if self.0.start == (self.0.end - 1) { return serializer.serialize_str(self.0.start.to_string().as_str()); } let range = format!("{}-{}", self.0.start, self.0.end); serializer.serialize_str(range.as_str()) } } impl<'de> Deserialize<'de> for PortRange { /// Port ranges can be specified in yaml as either "10" as as single value /// or as "10-20" as a range, between a minimum and a maximum. /// This deserializes either format into a [PortRange]. fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { struct PortRangeVisitor; impl<'de> Visitor<'de> for PortRangeVisitor { type Value = PortRange; fn expecting(&self, f: &mut Formatter) -> Result<(), fmt::Error> { f.write_str("A port range in the format of '10' or '10-20'") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: de::Error, { match v.split_once('-') { None => { let value = v.parse::<u16>().map_err(de::Error::custom)?; PortRange::new(value, value + 1).map_err(de::Error::custom) } Some(split) => { let start = split.0.parse::<u16>().map_err(de::Error::custom)?; let end = split.1.parse::<u16>().map_err(de::Error::custom)?; PortRange::new(start, end).map_err(de::Error::custom) } } } } deserializer.deserialize_str(PortRangeVisitor) } } impl TryFrom<ProtoConfig> for Config { type Error = ConvertProtoConfigError; fn try_from(p: ProtoConfig) -> Result<Self, Self::Error> { fn convert_port(range: &ProtoPortRange) -> Result<PortRange, ConvertProtoConfigError> { let min = u16::try_from(range.min).map_err(|err| { ConvertProtoConfigError::new( format!("min too large: {err}"), Some("port.min".into()), ) })?; let max = u16::try_from(range.max).map_err(|err| { ConvertProtoConfigError::new( format!("max too large: {err}"), Some("port.max".into()), ) })?; PortRange::new(min, max) .map_err(|err| ConvertProtoConfigError::new(format!("{err}"), Some("ports".into()))) } fn convert_rule(rule: &ProtoRule) -> Result<Rule, ConvertProtoConfigError> { let action = map_proto_enum!( value = rule.action, field = "policy", proto_enum_type = ProtoAction, target_enum_type = Action, variants = [Allow, Deny] )?; let source = IpNetwork::try_from(rule.source.as_str()).map_err(|err| { ConvertProtoConfigError::new( format!("invalid source: {err:?}"), Some("source".into()), ) })?; let ports = rule .ports .iter() .map(convert_port) .collect::<Result<Vec<PortRange>, ConvertProtoConfigError>>()?; Ok(Rule { action, source, ports, }) } Ok(Config { on_read: p .on_read .iter() .map(convert_rule) .collect::<Result<Vec<Rule>, ConvertProtoConfigError>>()?, on_write: p .on_write .iter() .map(convert_rule) .collect::<Result<Vec<Rule>, ConvertProtoConfigError>>()?, }) } } #[cfg(test)] mod tests { use super::*; #[test] fn deserialize_yaml() { let yaml = " on_read: - action: ALLOW source: 192.168.51.0/24 ports: - 10 - 1000-7000 on_write: - action: DENY source: 192.168.75.0/24 ports: - 7000 "; let config: Config = serde_yaml::from_str(yaml).unwrap(); let rule1 = config.on_read[0].clone(); assert_eq!(rule1.action, Action::Allow); assert_eq!(rule1.source, "192.168.51.0/24".parse().unwrap()); assert_eq!(2, rule1.ports.len()); assert_eq!(10, rule1.ports[0].0.start); assert_eq!(11, rule1.ports[0].0.end); assert_eq!(1000, rule1.ports[1].0.start); assert_eq!(7000, rule1.ports[1].0.end); let rule2 = config.on_write[0].clone(); assert_eq!(rule2.action, Action::Deny); assert_eq!(rule2.source, "192.168.75.0/24".parse().unwrap()); assert_eq!(1, rule2.ports.len()); assert_eq!(7000, rule2.ports[0].0.start); assert_eq!(7001, rule2.ports[0].0.end); } #[test] fn portrange_contains() { let range = PortRange::new(10, 100).unwrap(); assert!(range.contains(&10)); assert!(!range.contains(&100)); assert!(range.contains(&50)); assert!(!range.contains(&200)); assert!(!range.contains(&5)); // single value let single = PortRange::new(10, 11).unwrap(); assert!(single.contains(&10)); assert!(!single.contains(&11)); } #[test] fn convert() { let proto_config = ProtoConfig { on_read: vec![ProtoRule { action: ProtoAction::Allow as i32, source: "192.168.75.0/24".into(), ports: vec![ProtoPortRange { min: 10, max: 100 }], }], on_write: vec![ProtoRule { action: ProtoAction::Deny as i32, source: "192.168.124.0/24".into(), ports: vec![ProtoPortRange { min: 50, max: 51 }], }], }; let config = Config::try_from(proto_config).unwrap(); let rule1 = config.on_read[0].clone(); assert_eq!(rule1.action, Action::Allow); assert_eq!(rule1.source, "192.168.75.0/24".parse().unwrap()); assert_eq!(1, rule1.ports.len()); assert_eq!(10, rule1.ports[0].0.start); assert_eq!(100, rule1.ports[0].0.end); let rule2 = config.on_write[0].clone(); assert_eq!(rule2.action, Action::Deny); assert_eq!(rule2.source, "192.168.124.0/24".parse().unwrap()); assert_eq!(1, rule2.ports.len()); assert_eq!(50, rule2.ports[0].0.start); assert_eq!(51, rule2.ports[0].0.end); } #[test] fn rule_contains() { let rule = Rule { action: Action::Allow, source: "192.168.75.0/24".parse().unwrap(), ports: vec![PortRange::new(10, 100).unwrap()], }; let ip = [192, 168, 75, 10]; assert!(rule.contains((ip, 50).into())); assert!(rule.contains((ip, 99).into())); assert!(rule.contains((ip, 10).into())); assert!(!rule.contains((ip, 5).into())); assert!(!rule.contains((ip, 1000).into())); assert!(!rule.contains(([192, 168, 76, 10], 40).into())); } }
Rule
identifier_name
lib.rs
from the address of the struct. macro_rules! offset_of { ($container:path, $field:ident) => {{ // Make sure the field actually exists. This line ensures that a compile-time error is // generated if $field is accessed through a Deref impl. let $container { $field: _,.. }; // Create an (invalid) instance of the container and calculate the offset to its // field. Using a null pointer might be UB if `&(*(0 as *const T)).field` is interpreted to // be nullptr deref. let invalid: $container = ::std::mem::uninitialized(); let offset = &invalid.$field as *const _ as usize - &invalid as *const _ as usize; // Do not run destructors on the made up invalid instance. ::std::mem::forget(invalid); offset as isize }}; } /// A soft limit on the amount of references that may be made to an `Arc`. /// /// Going above this limit will abort your program (although not /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// Wrapper type for pointers to get the non-zero optimization. When /// NonZero/Shared/Unique are stabilized, we should just use Shared /// here to get the same effect. Gankro is working on this in [1]. /// /// It's unfortunate that this needs to infect all the caller types /// with'static. It would be nice to just use a &() and a PhantomData<T> /// instead, but then the compiler can't determine whether the &() should /// be thin or fat (which depends on whether or not T is sized). Given /// that this is all a temporary hack, this restriction is fine for now. /// /// [1] https://github.com/rust-lang/rust/issues/27730 pub struct NonZeroPtrMut<T:?Sized +'static>(&'static mut T); impl<T:?Sized> NonZeroPtrMut<T> { pub fn new(ptr: *mut T) -> Self { assert!(!(ptr as *mut u8).is_null()); NonZeroPtrMut(unsafe { mem::transmute(ptr) }) } pub fn ptr(&self) -> *mut T { self.0 as *const T as *mut T } } impl<T:?Sized +'static> Clone for NonZeroPtrMut<T> { fn clone(&self) -> Self { NonZeroPtrMut::new(self.ptr()) } } impl<T:?Sized +'static> fmt::Pointer for NonZeroPtrMut<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.ptr(), f) } } impl<T:?Sized +'static> fmt::Debug for NonZeroPtrMut<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { <Self as fmt::Pointer>::fmt(self, f) } } impl<T:?Sized +'static> PartialEq for NonZeroPtrMut<T> { fn eq(&self, other: &Self) -> bool { self.ptr() == other.ptr() } } impl<T:?Sized +'static> Eq for NonZeroPtrMut<T> {} pub struct Arc<T:?Sized +'static> { p: NonZeroPtrMut<ArcInner<T>>, } /// An Arc that is known to be uniquely owned /// /// This lets us build arcs that we can mutate before /// freezing, without needing to change the allocation pub struct UniqueArc<T:?Sized +'static>(Arc<T>); impl<T> UniqueArc<T> { #[inline] /// Construct a new UniqueArc pub fn new(data: T) -> Self { UniqueArc(Arc::new(data)) } #[inline] /// Convert to a shareable Arc<T> once we're done using it pub fn shareable(self) -> Arc<T> { self.0 } } impl<T> Deref for UniqueArc<T> { type Target = T; fn deref(&self) -> &T { &*self.0 } } impl<T> DerefMut for UniqueArc<T> { fn deref_mut(&mut self) -> &mut T { // We know this to be uniquely owned unsafe { &mut (*self.0.ptr()).data } } } unsafe impl<T:?Sized + Sync + Send> Send for Arc<T> {} unsafe impl<T:?Sized + Sync + Send> Sync for Arc<T> {} struct ArcInner<T:?Sized> { count: atomic::AtomicUsize, data: T, } unsafe impl<T:?Sized + Sync + Send> Send for ArcInner<T> {} unsafe impl<T:?Sized + Sync + Send> Sync for ArcInner<T> {} impl<T> Arc<T> { #[inline] pub fn new(data: T) -> Self { let x = Box::new(ArcInner { count: atomic::AtomicUsize::new(1), data: data, }); Arc { p: NonZeroPtrMut::new(Box::into_raw(x)) } } pub fn into_raw(this: Self) -> *const T { let ptr = unsafe { &((*this.ptr()).data) as *const _ }; mem::forget(this); ptr } pub unsafe fn from_raw(ptr: *const T) -> Self { // To find the corresponding pointer to the `ArcInner` we need // to subtract the offset of the `data` field from the pointer. let ptr = (ptr as *const u8).offset(-offset_of!(ArcInner<T>, data)); Arc { p: NonZeroPtrMut::new(ptr as *mut ArcInner<T>), } } } impl<T:?Sized> Arc<T> { #[inline] fn inner(&self) -> &ArcInner<T> { // This unsafety is ok because while this arc is alive we're guaranteed // that the inner pointer is valid. Furthermore, we know that the // `ArcInner` structure itself is `Sync` because the inner data is // `Sync` as well, so we're ok loaning out an immutable pointer to these // contents. unsafe { &*self.ptr() } } // Non-inlined part of `drop`. Just invokes the destructor. #[inline(never)] unsafe fn drop_slow(&mut self) { let _ = Box::from_raw(self.ptr()); } #[inline] pub fn ptr_eq(this: &Self, other: &Self) -> bool { this.ptr() == other.ptr() } fn ptr(&self) -> *mut ArcInner<T> { self.p.ptr() } } impl<T:?Sized> Clone for Arc<T> { #[inline] fn clone(&self) -> Self { // Using a relaxed ordering is alright here, as knowledge of the // original reference prevents other threads from erroneously deleting // the object. // // As explained in the [Boost documentation][1], Increasing the // reference counter can always be done with memory_order_relaxed: New // references to an object can only be formed from an existing // reference, and passing an existing reference from one thread to // another must already provide any required synchronization. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) let old_size = self.inner().count.fetch_add(1, Relaxed); // However we need to guard against massive refcounts in case someone // is `mem::forget`ing Arcs. If we don't do this the count can overflow // and users will use-after free. We racily saturate to `isize::MAX` on // the assumption that there aren't ~2 billion threads incrementing // the reference count at once. This branch will never be taken in // any realistic program. // // We abort because such a program is incredibly degenerate, and we // don't care to support it. if old_size > MAX_REFCOUNT { process::abort(); } Arc { p: NonZeroPtrMut::new(self.ptr()) } } } impl<T:?Sized> Deref for Arc<T> { type Target = T; #[inline] fn deref(&self) -> &T { &self.inner().data } } impl<T: Clone> Arc<T> { #[inline] pub fn make_mut(this: &mut Self) -> &mut T { if!this.is_unique() { // Another pointer exists; clone *this = Arc::new((**this).clone()); } unsafe { // This unsafety is ok because we're guaranteed that the pointer // returned is the *only* pointer that will ever be returned to T. Our // reference count is guaranteed to be 1 at this point, and we required // the Arc itself to be `mut`, so we're returning the only possible // reference to the inner data. &mut (*this.ptr()).data } } } impl<T:?Sized> Arc<T> { #[inline] pub fn get_mut(this: &mut Self) -> Option<&mut T> { if this.is_unique()
else { None } } #[inline] fn is_unique(&self) -> bool { // We can use Relaxed here, but the justification is a bit subtle. // // The reason to use Acquire would be to synchronize with other threads // that are modifying the refcount with Release, i.e. to ensure that // their writes to memory guarded by this refcount are flushed. However, // we know that threads only modify the contents of the Arc when they // observe the refcount to be 1, and no other thread could observe that // because we're holding one strong reference here. self.inner().count.load(Relaxed) == 1 } } impl<T:?Sized> Drop for Arc<T> { #[inline] fn drop(&mut self) { // Because `fetch_sub` is already atomic, we do not need to synchronize // with other threads unless we are going to delete the object. if self.inner().count.fetch_sub(1, Release)!= 1 { return; } // FIXME(bholley): Use the updated comment when [2] is merged. // // This load is needed to prevent reordering of use of the data and // deletion of the data. Because it is marked `Release`, the decreasing // of the reference count synchronizes with this `Acquire` load. This // means that use of the data happens before decreasing the reference // count, which happens before this load, which happens before the // deletion of the data. // // As explained in the [Boost documentation][1], // // > It is important to enforce any possible access to the object in one // > thread (through an existing reference) to *happen before* deleting // > the object in a different thread. This is achieved by a "release" // > operation after dropping a reference (any access to the object // > through this reference must obviously happened before), and an // > "acquire" operation before deleting the object. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) // [2]: https://github.com/rust-lang/rust/pull/41714 self.inner().count.load(Acquire); unsafe { self.drop_slow(); } } } impl<T:?Sized + PartialEq> PartialEq for Arc<T> { fn eq(&self, other: &Arc<T>) -> bool { *(*self) == *(*other) } fn ne(&self, other: &Arc<T>) -> bool { *(*self)!= *(*other) } } impl<T:?Sized + PartialOrd> PartialOrd for Arc<T> { fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> { (**self).partial_cmp(&**other) } fn lt(&self, other: &Arc<T>) -> bool { *(*self) < *(*other) } fn le(&self, other: &Arc<T>) -> bool { *(*self) <= *(*other) } fn gt(&self, other: &Arc<T>) -> bool { *(*self) > *(*other) } fn ge(&self, other: &Arc<T>) -> bool { *(*self) >= *(*other) } } impl<T:?Sized + Ord> Ord for Arc<T> { fn cmp(&self, other: &Arc<T>) -> Ordering { (**self).cmp(&**other) } } impl<T:?Sized + Eq> Eq for Arc<T> {} impl<T:?Sized + fmt::Display> fmt::Display for Arc<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&**self, f) } } impl<T:?Sized + fmt::Debug> fmt::Debug for Arc<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl<T:?Sized> fmt::Pointer for Arc<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.ptr(), f) } } impl<T: Default> Default for Arc<T> { fn default() -> Arc<T> { Arc::new(Default::default()) } } impl<T:?Sized + Hash> Hash for Arc<T> { fn hash<H: Hasher>(&self, state: &mut H) { (**self).hash(state) } } impl<T> From<T> for Arc<T> { fn from(t: T) -> Self { Arc::new(t) } } impl<T:?Sized> borrow::Borrow<T> for Arc<T> { fn borrow(&self) -> &T { &**self } } impl<T:?Sized> AsRef<T> for Arc<T> { fn as_ref(&self) -> &T { &**self } } // This is what the HeapSize crate does for regular arc, but is questionably // sound. See https://github.com/servo/heapsize/issues/37 #[cfg(feature = "servo")] impl<T: HeapSizeOf> HeapSizeOf for Arc<T> { fn heap_size_of_children(&self) -> usize { (**self).heap_size_of_children() } } #[cfg(feature = "servo")] impl<'de, T: Deserialize<'de>> Deserialize<'de> for Arc<T> { fn deserialize<D>(deserializer: D) -> Result<Arc<T>, D::Error> where D: ::serde::de::Deserializer<'de>, { T::deserialize(deserializer).map(Arc::new) } } #[cfg(feature = "servo")] impl<T: Serialize> Serialize for Arc<T> { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: ::serde::ser::Serializer, { (**self).serialize(serializer) } } /// Structure to allow Arc-managing some fixed-sized data and a variably-sized /// slice in a single allocation. #[derive(Debug, Eq, PartialEq, PartialOrd)] pub struct HeaderSlice<H, T:?Sized> { /// The fixed-sized data. pub header: H, /// The dynamically-sized data. pub slice: T, } #[inline(always)] fn divide_rounding_up(dividend: usize, divisor: usize) -> usize { (dividend + divisor - 1) / divisor } impl<H, T> Arc<HeaderSlice<H, [T]>> { /// Creates an Arc for a HeaderSlice using the given header struct and /// iterator to generate the slice. The resulting Arc will be fat. #[inline] pub fn from_header_and_iter<I>(header: H, mut items: I) -> Self where I: Iterator<Item=T> + ExactSizeIterator { use ::std::mem::size_of; assert!(size_of::<T>()!= 0, "Need to think about ZST"); // Compute the required size for the allocation. let num_items = items.len(); let size = { // First, determine the alignment of a hypothetical pointer to a // HeaderSlice. let fake_slice_ptr_align: usize = mem::align_of::<ArcInner<HeaderSlice<H, [T; 1]>>>(); // Next, synthesize a totally garbage (but properly aligned) pointer // to a sequence of T. let fake_slice_ptr = fake_slice_ptr_align as *const T; // Convert that sequence to a fat pointer. The address component of // the fat pointer will be garbage, but the length will be correct. let fake_slice = unsafe { slice::from_raw_parts(fake_slice_ptr, num_items) }; // Pretend the garbage address points to our allocation target (with // a trailing sequence of T), rather than just a sequence of T. let fake_ptr = fake_slice as *const [T] as *const ArcInner<HeaderSlice<H, [T]>>; let fake_ref: &ArcInner<HeaderSlice<H, [T]>> = unsafe { &*fake_ptr }; // Use size_of_val, which will combine static information about the // type with the length from the fat pointer. The garbage address // will not be used. mem::size_of_val(fake_ref) }; let ptr: *mut ArcInner<HeaderSlice<H, [T]>>; unsafe { // Allocate the buffer. We use Vec because the underlying allocation // machinery isn't available in stable Rust. // // To avoid alignment issues, we allocate words rather than bytes, // rounding up to the nearest word size. let buffer = if mem::align_of::<T>() <= mem::align_of::<usize>() { Self::allocate_buffer::<usize>(size) } else if mem::align_of::<T>() <= mem::align_of::<u64>() { // On 32-bit platforms <T> may have 8 byte alignment while usize has 4 byte aligment. // Use u64 to avoid over-alignment. // This branch will compile away in optimized builds. Self::allocate_buffer::<u64>(size) } else { panic!("Over-aligned type not handled"); }; // Synthesize the fat pointer. We do this by claiming we have a direct // pointer to a [T], and then changing the type of the borrow. The key // point here is that the length portion of the fat pointer applies // only to the number of elements in the dynamically-sized portion of // the type, so the value will be the same whether it points to a [T] // or something else with a [T] as its last member. let fake_slice: &mut [T] = slice::from_raw_parts_mut(buffer as *mut T, num_items); ptr = fake_slice as *mut [T] as *mut ArcInner<HeaderSlice<H, [T]>>; // Write the data. // // Note that any panics here (i.e. from the iterator) are safe, since // we'll just leak the uninitialized memory. ptr::write(&mut ((*ptr).count), atomic::AtomicUsize::new(1)); ptr::write(&mut ((*ptr).data.header), header); let mut current: *mut T = &mut (*ptr).data.slice[0]; for _ in 0..num_items { ptr::write(current, items.next().expect("ExactSizeIterator over-reported length")); current = current.offset(1); } assert!(items.next().is_none(), "ExactSizeIterator under-reported length"); // We should have consumed the buffer exactly. debug_assert!(current as *mut u8 == buffer.offset(size as isize)); } // Return the fat Arc. assert_eq!(size_of::<Self>(), size_of::<usize>() * 2, "The Arc will be fat"); Arc { p: NonZeroPtrMut::new(ptr) } } #[inline] unsafe fn allocate_buffer<W>(size: usize) -> *mut u8 { let words_to_allocate = divide_rounding_up(size, mem::size_of::<W>()); let mut vec = Vec::<W>::with_capacity(words_to_allocate); vec.set_len(words_to_allocate); Box::into_raw(vec.into_boxed_slice()) as *mut W as *mut u8 } } /// Header data with an inline length. Consumers that use HeaderWithLength as the /// Header type in HeaderSlice can take advantage of ThinArc. #[derive(Debug, Eq, PartialEq, PartialOrd)] pub struct HeaderWithLength<H> { /// The fixed-sized data. pub header: H, /// The slice length. length: usize, } impl<H> HeaderWithLength<H> { /// Creates a new HeaderWithLength. pub fn new(header: H, length: usize) -> Self { HeaderWithLength { header: header, length: length, } } } type HeaderSliceWithLength<H, T> = HeaderSlice<HeaderWithLength<H>, T>; pub struct ThinArc<H:'static, T:'static> { ptr: *mut ArcInner<HeaderSliceWithLength<H, [T; 1]>>, } unsafe impl<H: Sync + Send, T: Sync + Send> Send for ThinArc<H, T> {} unsafe impl<H: Sync + Send, T: Sync + Send> Sync for ThinArc<H, T> {} // Synthesize a fat pointer from a thin pointer. // // See the comment around the analogous operation in from_header_and_iter. fn thin_to_thick<H, T>(thin: *mut ArcInner<HeaderSliceWithLength<H, [T; 1]>>) -> *mut ArcInner<HeaderSliceWithLength<H, [T]>> { let len = unsafe { (*thin).data.header.length }; let fake_slice: *mut [T] = unsafe { slice::from_raw_parts_mut(thin as *mut T, len) }; fake_slice as *mut ArcInner<HeaderSliceWithLength<H, [T]>> } impl<H:'static, T:'static> ThinArc<H, T> { /// Temporarily converts |self| into a bonafide Arc and exposes it to the /// provided callback. The refcount is not modified. #[inline(always)] pub fn with_arc<F, U>(&self, f: F) -> U where F: FnOnce(&Arc<HeaderSliceWithLength<H, [T]>>) -> U { // Synthesize transient Arc, which never touches the refcount of the ArcInner. let transient = NoDrop::new(Arc { p: NonZeroPtrMut::new(thin_to_thick(self.ptr)) }); // Expose the transient Arc to the callback, which may clone it if it wants. let result = f(&transient); // Forget the transient Arc to leave the refcount untouched. mem::forget(transient); // Forward the result. result } } impl<H, T> Deref for ThinArc<H, T> { type Target = HeaderSliceWithLength<H, [T]>; fn deref(&self) -> &Self::Target { unsafe { &(*thin_to_thick(self.ptr)).data } } } impl<H:'static, T:'static> Clone for ThinArc<H, T
{ unsafe { // See make_mut() for documentation of the threadsafety here. Some(&mut (*this.ptr()).data) } }
conditional_block
lib.rs
in bytes from the address of the struct. macro_rules! offset_of { ($container:path, $field:ident) => {{ // Make sure the field actually exists. This line ensures that a compile-time error is // generated if $field is accessed through a Deref impl. let $container { $field: _,.. }; // Create an (invalid) instance of the container and calculate the offset to its // field. Using a null pointer might be UB if `&(*(0 as *const T)).field` is interpreted to // be nullptr deref. let invalid: $container = ::std::mem::uninitialized(); let offset = &invalid.$field as *const _ as usize - &invalid as *const _ as usize; // Do not run destructors on the made up invalid instance. ::std::mem::forget(invalid); offset as isize }}; } /// A soft limit on the amount of references that may be made to an `Arc`. /// /// Going above this limit will abort your program (although not /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// Wrapper type for pointers to get the non-zero optimization. When /// NonZero/Shared/Unique are stabilized, we should just use Shared /// here to get the same effect. Gankro is working on this in [1]. /// /// It's unfortunate that this needs to infect all the caller types /// with'static. It would be nice to just use a &() and a PhantomData<T> /// instead, but then the compiler can't determine whether the &() should /// be thin or fat (which depends on whether or not T is sized). Given /// that this is all a temporary hack, this restriction is fine for now. /// /// [1] https://github.com/rust-lang/rust/issues/27730 pub struct NonZeroPtrMut<T:?Sized +'static>(&'static mut T); impl<T:?Sized> NonZeroPtrMut<T> { pub fn new(ptr: *mut T) -> Self { assert!(!(ptr as *mut u8).is_null()); NonZeroPtrMut(unsafe { mem::transmute(ptr) }) } pub fn ptr(&self) -> *mut T { self.0 as *const T as *mut T } } impl<T:?Sized +'static> Clone for NonZeroPtrMut<T> { fn clone(&self) -> Self { NonZeroPtrMut::new(self.ptr()) } } impl<T:?Sized +'static> fmt::Pointer for NonZeroPtrMut<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.ptr(), f) } } impl<T:?Sized +'static> fmt::Debug for NonZeroPtrMut<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { <Self as fmt::Pointer>::fmt(self, f) } } impl<T:?Sized +'static> PartialEq for NonZeroPtrMut<T> { fn eq(&self, other: &Self) -> bool { self.ptr() == other.ptr() } } impl<T:?Sized +'static> Eq for NonZeroPtrMut<T> {} pub struct Arc<T:?Sized +'static> { p: NonZeroPtrMut<ArcInner<T>>, } /// An Arc that is known to be uniquely owned /// /// This lets us build arcs that we can mutate before /// freezing, without needing to change the allocation pub struct UniqueArc<T:?Sized +'static>(Arc<T>); impl<T> UniqueArc<T> { #[inline] /// Construct a new UniqueArc pub fn new(data: T) -> Self { UniqueArc(Arc::new(data)) } #[inline] /// Convert to a shareable Arc<T> once we're done using it pub fn shareable(self) -> Arc<T> { self.0 } } impl<T> Deref for UniqueArc<T> { type Target = T; fn deref(&self) -> &T { &*self.0 } } impl<T> DerefMut for UniqueArc<T> { fn deref_mut(&mut self) -> &mut T { // We know this to be uniquely owned unsafe { &mut (*self.0.ptr()).data } } } unsafe impl<T:?Sized + Sync + Send> Send for Arc<T> {} unsafe impl<T:?Sized + Sync + Send> Sync for Arc<T> {} struct ArcInner<T:?Sized> { count: atomic::AtomicUsize, data: T, } unsafe impl<T:?Sized + Sync + Send> Send for ArcInner<T> {} unsafe impl<T:?Sized + Sync + Send> Sync for ArcInner<T> {} impl<T> Arc<T> { #[inline] pub fn new(data: T) -> Self { let x = Box::new(ArcInner { count: atomic::AtomicUsize::new(1), data: data, }); Arc { p: NonZeroPtrMut::new(Box::into_raw(x)) } } pub fn into_raw(this: Self) -> *const T { let ptr = unsafe { &((*this.ptr()).data) as *const _ }; mem::forget(this); ptr } pub unsafe fn from_raw(ptr: *const T) -> Self { // To find the corresponding pointer to the `ArcInner` we need // to subtract the offset of the `data` field from the pointer. let ptr = (ptr as *const u8).offset(-offset_of!(ArcInner<T>, data)); Arc { p: NonZeroPtrMut::new(ptr as *mut ArcInner<T>), } } } impl<T:?Sized> Arc<T> { #[inline] fn inner(&self) -> &ArcInner<T> { // This unsafety is ok because while this arc is alive we're guaranteed // that the inner pointer is valid. Furthermore, we know that the // `ArcInner` structure itself is `Sync` because the inner data is // `Sync` as well, so we're ok loaning out an immutable pointer to these // contents. unsafe { &*self.ptr() } } // Non-inlined part of `drop`. Just invokes the destructor. #[inline(never)] unsafe fn drop_slow(&mut self) { let _ = Box::from_raw(self.ptr()); } #[inline] pub fn ptr_eq(this: &Self, other: &Self) -> bool { this.ptr() == other.ptr() } fn ptr(&self) -> *mut ArcInner<T> { self.p.ptr() } } impl<T:?Sized> Clone for Arc<T> { #[inline] fn clone(&self) -> Self { // Using a relaxed ordering is alright here, as knowledge of the // original reference prevents other threads from erroneously deleting // the object. // // As explained in the [Boost documentation][1], Increasing the // reference counter can always be done with memory_order_relaxed: New // references to an object can only be formed from an existing // reference, and passing an existing reference from one thread to // another must already provide any required synchronization. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) let old_size = self.inner().count.fetch_add(1, Relaxed); // However we need to guard against massive refcounts in case someone // is `mem::forget`ing Arcs. If we don't do this the count can overflow // and users will use-after free. We racily saturate to `isize::MAX` on // the assumption that there aren't ~2 billion threads incrementing // the reference count at once. This branch will never be taken in // any realistic program. // // We abort because such a program is incredibly degenerate, and we // don't care to support it. if old_size > MAX_REFCOUNT { process::abort(); } Arc { p: NonZeroPtrMut::new(self.ptr()) } } } impl<T:?Sized> Deref for Arc<T> { type Target = T; #[inline] fn deref(&self) -> &T { &self.inner().data } } impl<T: Clone> Arc<T> { #[inline] pub fn make_mut(this: &mut Self) -> &mut T { if!this.is_unique() { // Another pointer exists; clone *this = Arc::new((**this).clone()); } unsafe { // This unsafety is ok because we're guaranteed that the pointer // returned is the *only* pointer that will ever be returned to T. Our // reference count is guaranteed to be 1 at this point, and we required // the Arc itself to be `mut`, so we're returning the only possible // reference to the inner data. &mut (*this.ptr()).data } } } impl<T:?Sized> Arc<T> { #[inline] pub fn get_mut(this: &mut Self) -> Option<&mut T> { if this.is_unique() { unsafe { // See make_mut() for documentation of the threadsafety here. Some(&mut (*this.ptr()).data) } } else { None } } #[inline] fn is_unique(&self) -> bool { // We can use Relaxed here, but the justification is a bit subtle. // // The reason to use Acquire would be to synchronize with other threads // that are modifying the refcount with Release, i.e. to ensure that // their writes to memory guarded by this refcount are flushed. However, // we know that threads only modify the contents of the Arc when they // observe the refcount to be 1, and no other thread could observe that // because we're holding one strong reference here. self.inner().count.load(Relaxed) == 1 } } impl<T:?Sized> Drop for Arc<T> { #[inline] fn drop(&mut self) { // Because `fetch_sub` is already atomic, we do not need to synchronize // with other threads unless we are going to delete the object. if self.inner().count.fetch_sub(1, Release)!= 1 { return; } // FIXME(bholley): Use the updated comment when [2] is merged. // // This load is needed to prevent reordering of use of the data and // deletion of the data. Because it is marked `Release`, the decreasing // of the reference count synchronizes with this `Acquire` load. This // means that use of the data happens before decreasing the reference // count, which happens before this load, which happens before the // deletion of the data. // // As explained in the [Boost documentation][1], // // > It is important to enforce any possible access to the object in one // > thread (through an existing reference) to *happen before* deleting // > the object in a different thread. This is achieved by a "release" // > operation after dropping a reference (any access to the object // > through this reference must obviously happened before), and an // > "acquire" operation before deleting the object. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) // [2]: https://github.com/rust-lang/rust/pull/41714 self.inner().count.load(Acquire); unsafe { self.drop_slow(); } } } impl<T:?Sized + PartialEq> PartialEq for Arc<T> { fn eq(&self, other: &Arc<T>) -> bool { *(*self) == *(*other) } fn ne(&self, other: &Arc<T>) -> bool { *(*self)!= *(*other) } } impl<T:?Sized + PartialOrd> PartialOrd for Arc<T> { fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> { (**self).partial_cmp(&**other) } fn lt(&self, other: &Arc<T>) -> bool { *(*self) < *(*other) } fn le(&self, other: &Arc<T>) -> bool { *(*self) <= *(*other) } fn gt(&self, other: &Arc<T>) -> bool { *(*self) > *(*other) } fn ge(&self, other: &Arc<T>) -> bool { *(*self) >= *(*other) } } impl<T:?Sized + Ord> Ord for Arc<T> { fn cmp(&self, other: &Arc<T>) -> Ordering { (**self).cmp(&**other) } } impl<T:?Sized + Eq> Eq for Arc<T> {} impl<T:?Sized + fmt::Display> fmt::Display for Arc<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&**self, f) } } impl<T:?Sized + fmt::Debug> fmt::Debug for Arc<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl<T:?Sized> fmt::Pointer for Arc<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.ptr(), f) } } impl<T: Default> Default for Arc<T> { fn default() -> Arc<T> { Arc::new(Default::default()) } } impl<T:?Sized + Hash> Hash for Arc<T> { fn hash<H: Hasher>(&self, state: &mut H) { (**self).hash(state) } } impl<T> From<T> for Arc<T> { fn from(t: T) -> Self { Arc::new(t) } } impl<T:?Sized> borrow::Borrow<T> for Arc<T> { fn borrow(&self) -> &T { &**self } } impl<T:?Sized> AsRef<T> for Arc<T> { fn as_ref(&self) -> &T { &**self } } // This is what the HeapSize crate does for regular arc, but is questionably // sound. See https://github.com/servo/heapsize/issues/37 #[cfg(feature = "servo")] impl<T: HeapSizeOf> HeapSizeOf for Arc<T> { fn heap_size_of_children(&self) -> usize { (**self).heap_size_of_children() } } #[cfg(feature = "servo")] impl<'de, T: Deserialize<'de>> Deserialize<'de> for Arc<T> { fn deserialize<D>(deserializer: D) -> Result<Arc<T>, D::Error> where D: ::serde::de::Deserializer<'de>, { T::deserialize(deserializer).map(Arc::new) } } #[cfg(feature = "servo")] impl<T: Serialize> Serialize for Arc<T> { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: ::serde::ser::Serializer, { (**self).serialize(serializer) } } /// Structure to allow Arc-managing some fixed-sized data and a variably-sized /// slice in a single allocation. #[derive(Debug, Eq, PartialEq, PartialOrd)] pub struct HeaderSlice<H, T:?Sized> { /// The fixed-sized data. pub header: H, /// The dynamically-sized data. pub slice: T, } #[inline(always)] fn divide_rounding_up(dividend: usize, divisor: usize) -> usize { (dividend + divisor - 1) / divisor } impl<H, T> Arc<HeaderSlice<H, [T]>> { /// Creates an Arc for a HeaderSlice using the given header struct and /// iterator to generate the slice. The resulting Arc will be fat. #[inline] pub fn from_header_and_iter<I>(header: H, mut items: I) -> Self where I: Iterator<Item=T> + ExactSizeIterator { use ::std::mem::size_of; assert!(size_of::<T>()!= 0, "Need to think about ZST"); // Compute the required size for the allocation. let num_items = items.len(); let size = { // First, determine the alignment of a hypothetical pointer to a // HeaderSlice. let fake_slice_ptr_align: usize = mem::align_of::<ArcInner<HeaderSlice<H, [T; 1]>>>(); // Next, synthesize a totally garbage (but properly aligned) pointer // to a sequence of T. let fake_slice_ptr = fake_slice_ptr_align as *const T; // Convert that sequence to a fat pointer. The address component of // the fat pointer will be garbage, but the length will be correct. let fake_slice = unsafe { slice::from_raw_parts(fake_slice_ptr, num_items) }; // Pretend the garbage address points to our allocation target (with // a trailing sequence of T), rather than just a sequence of T. let fake_ptr = fake_slice as *const [T] as *const ArcInner<HeaderSlice<H, [T]>>; let fake_ref: &ArcInner<HeaderSlice<H, [T]>> = unsafe { &*fake_ptr }; // Use size_of_val, which will combine static information about the // type with the length from the fat pointer. The garbage address // will not be used. mem::size_of_val(fake_ref) }; let ptr: *mut ArcInner<HeaderSlice<H, [T]>>; unsafe { // Allocate the buffer. We use Vec because the underlying allocation // machinery isn't available in stable Rust. // // To avoid alignment issues, we allocate words rather than bytes, // rounding up to the nearest word size. let buffer = if mem::align_of::<T>() <= mem::align_of::<usize>() { Self::allocate_buffer::<usize>(size) } else if mem::align_of::<T>() <= mem::align_of::<u64>() { // On 32-bit platforms <T> may have 8 byte alignment while usize has 4 byte aligment. // Use u64 to avoid over-alignment. // This branch will compile away in optimized builds. Self::allocate_buffer::<u64>(size) } else { panic!("Over-aligned type not handled"); }; // Synthesize the fat pointer. We do this by claiming we have a direct // pointer to a [T], and then changing the type of the borrow. The key // point here is that the length portion of the fat pointer applies // only to the number of elements in the dynamically-sized portion of // the type, so the value will be the same whether it points to a [T] // or something else with a [T] as its last member. let fake_slice: &mut [T] = slice::from_raw_parts_mut(buffer as *mut T, num_items); ptr = fake_slice as *mut [T] as *mut ArcInner<HeaderSlice<H, [T]>>; // Write the data. // // Note that any panics here (i.e. from the iterator) are safe, since // we'll just leak the uninitialized memory. ptr::write(&mut ((*ptr).count), atomic::AtomicUsize::new(1)); ptr::write(&mut ((*ptr).data.header), header); let mut current: *mut T = &mut (*ptr).data.slice[0]; for _ in 0..num_items { ptr::write(current, items.next().expect("ExactSizeIterator over-reported length")); current = current.offset(1); } assert!(items.next().is_none(), "ExactSizeIterator under-reported length"); // We should have consumed the buffer exactly. debug_assert!(current as *mut u8 == buffer.offset(size as isize)); } // Return the fat Arc. assert_eq!(size_of::<Self>(), size_of::<usize>() * 2, "The Arc will be fat"); Arc { p: NonZeroPtrMut::new(ptr) } } #[inline] unsafe fn allocate_buffer<W>(size: usize) -> *mut u8 { let words_to_allocate = divide_rounding_up(size, mem::size_of::<W>()); let mut vec = Vec::<W>::with_capacity(words_to_allocate); vec.set_len(words_to_allocate); Box::into_raw(vec.into_boxed_slice()) as *mut W as *mut u8 } } /// Header data with an inline length. Consumers that use HeaderWithLength as the /// Header type in HeaderSlice can take advantage of ThinArc. #[derive(Debug, Eq, PartialEq, PartialOrd)] pub struct HeaderWithLength<H> { /// The fixed-sized data. pub header: H,
impl<H> HeaderWithLength<H> { /// Creates a new HeaderWithLength. pub fn new(header: H, length: usize) -> Self { HeaderWithLength { header: header, length: length, } } } type HeaderSliceWithLength<H, T> = HeaderSlice<HeaderWithLength<H>, T>; pub struct ThinArc<H:'static, T:'static> { ptr: *mut ArcInner<HeaderSliceWithLength<H, [T; 1]>>, } unsafe impl<H: Sync + Send, T: Sync + Send> Send for ThinArc<H, T> {} unsafe impl<H: Sync + Send, T: Sync + Send> Sync for ThinArc<H, T> {} // Synthesize a fat pointer from a thin pointer. // // See the comment around the analogous operation in from_header_and_iter. fn thin_to_thick<H, T>(thin: *mut ArcInner<HeaderSliceWithLength<H, [T; 1]>>) -> *mut ArcInner<HeaderSliceWithLength<H, [T]>> { let len = unsafe { (*thin).data.header.length }; let fake_slice: *mut [T] = unsafe { slice::from_raw_parts_mut(thin as *mut T, len) }; fake_slice as *mut ArcInner<HeaderSliceWithLength<H, [T]>> } impl<H:'static, T:'static> ThinArc<H, T> { /// Temporarily converts |self| into a bonafide Arc and exposes it to the /// provided callback. The refcount is not modified. #[inline(always)] pub fn with_arc<F, U>(&self, f: F) -> U where F: FnOnce(&Arc<HeaderSliceWithLength<H, [T]>>) -> U { // Synthesize transient Arc, which never touches the refcount of the ArcInner. let transient = NoDrop::new(Arc { p: NonZeroPtrMut::new(thin_to_thick(self.ptr)) }); // Expose the transient Arc to the callback, which may clone it if it wants. let result = f(&transient); // Forget the transient Arc to leave the refcount untouched. mem::forget(transient); // Forward the result. result } } impl<H, T> Deref for ThinArc<H, T> { type Target = HeaderSliceWithLength<H, [T]>; fn deref(&self) -> &Self::Target { unsafe { &(*thin_to_thick(self.ptr)).data } } } impl<H:'static, T:'static> Clone for ThinArc<H, T>
/// The slice length. length: usize, }
random_line_split
lib.rs
from the address of the struct. macro_rules! offset_of { ($container:path, $field:ident) => {{ // Make sure the field actually exists. This line ensures that a compile-time error is // generated if $field is accessed through a Deref impl. let $container { $field: _,.. }; // Create an (invalid) instance of the container and calculate the offset to its // field. Using a null pointer might be UB if `&(*(0 as *const T)).field` is interpreted to // be nullptr deref. let invalid: $container = ::std::mem::uninitialized(); let offset = &invalid.$field as *const _ as usize - &invalid as *const _ as usize; // Do not run destructors on the made up invalid instance. ::std::mem::forget(invalid); offset as isize }}; } /// A soft limit on the amount of references that may be made to an `Arc`. /// /// Going above this limit will abort your program (although not /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// Wrapper type for pointers to get the non-zero optimization. When /// NonZero/Shared/Unique are stabilized, we should just use Shared /// here to get the same effect. Gankro is working on this in [1]. /// /// It's unfortunate that this needs to infect all the caller types /// with'static. It would be nice to just use a &() and a PhantomData<T> /// instead, but then the compiler can't determine whether the &() should /// be thin or fat (which depends on whether or not T is sized). Given /// that this is all a temporary hack, this restriction is fine for now. /// /// [1] https://github.com/rust-lang/rust/issues/27730 pub struct NonZeroPtrMut<T:?Sized +'static>(&'static mut T); impl<T:?Sized> NonZeroPtrMut<T> { pub fn new(ptr: *mut T) -> Self { assert!(!(ptr as *mut u8).is_null()); NonZeroPtrMut(unsafe { mem::transmute(ptr) }) } pub fn ptr(&self) -> *mut T { self.0 as *const T as *mut T } } impl<T:?Sized +'static> Clone for NonZeroPtrMut<T> { fn clone(&self) -> Self { NonZeroPtrMut::new(self.ptr()) } } impl<T:?Sized +'static> fmt::Pointer for NonZeroPtrMut<T> { fn
(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.ptr(), f) } } impl<T:?Sized +'static> fmt::Debug for NonZeroPtrMut<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { <Self as fmt::Pointer>::fmt(self, f) } } impl<T:?Sized +'static> PartialEq for NonZeroPtrMut<T> { fn eq(&self, other: &Self) -> bool { self.ptr() == other.ptr() } } impl<T:?Sized +'static> Eq for NonZeroPtrMut<T> {} pub struct Arc<T:?Sized +'static> { p: NonZeroPtrMut<ArcInner<T>>, } /// An Arc that is known to be uniquely owned /// /// This lets us build arcs that we can mutate before /// freezing, without needing to change the allocation pub struct UniqueArc<T:?Sized +'static>(Arc<T>); impl<T> UniqueArc<T> { #[inline] /// Construct a new UniqueArc pub fn new(data: T) -> Self { UniqueArc(Arc::new(data)) } #[inline] /// Convert to a shareable Arc<T> once we're done using it pub fn shareable(self) -> Arc<T> { self.0 } } impl<T> Deref for UniqueArc<T> { type Target = T; fn deref(&self) -> &T { &*self.0 } } impl<T> DerefMut for UniqueArc<T> { fn deref_mut(&mut self) -> &mut T { // We know this to be uniquely owned unsafe { &mut (*self.0.ptr()).data } } } unsafe impl<T:?Sized + Sync + Send> Send for Arc<T> {} unsafe impl<T:?Sized + Sync + Send> Sync for Arc<T> {} struct ArcInner<T:?Sized> { count: atomic::AtomicUsize, data: T, } unsafe impl<T:?Sized + Sync + Send> Send for ArcInner<T> {} unsafe impl<T:?Sized + Sync + Send> Sync for ArcInner<T> {} impl<T> Arc<T> { #[inline] pub fn new(data: T) -> Self { let x = Box::new(ArcInner { count: atomic::AtomicUsize::new(1), data: data, }); Arc { p: NonZeroPtrMut::new(Box::into_raw(x)) } } pub fn into_raw(this: Self) -> *const T { let ptr = unsafe { &((*this.ptr()).data) as *const _ }; mem::forget(this); ptr } pub unsafe fn from_raw(ptr: *const T) -> Self { // To find the corresponding pointer to the `ArcInner` we need // to subtract the offset of the `data` field from the pointer. let ptr = (ptr as *const u8).offset(-offset_of!(ArcInner<T>, data)); Arc { p: NonZeroPtrMut::new(ptr as *mut ArcInner<T>), } } } impl<T:?Sized> Arc<T> { #[inline] fn inner(&self) -> &ArcInner<T> { // This unsafety is ok because while this arc is alive we're guaranteed // that the inner pointer is valid. Furthermore, we know that the // `ArcInner` structure itself is `Sync` because the inner data is // `Sync` as well, so we're ok loaning out an immutable pointer to these // contents. unsafe { &*self.ptr() } } // Non-inlined part of `drop`. Just invokes the destructor. #[inline(never)] unsafe fn drop_slow(&mut self) { let _ = Box::from_raw(self.ptr()); } #[inline] pub fn ptr_eq(this: &Self, other: &Self) -> bool { this.ptr() == other.ptr() } fn ptr(&self) -> *mut ArcInner<T> { self.p.ptr() } } impl<T:?Sized> Clone for Arc<T> { #[inline] fn clone(&self) -> Self { // Using a relaxed ordering is alright here, as knowledge of the // original reference prevents other threads from erroneously deleting // the object. // // As explained in the [Boost documentation][1], Increasing the // reference counter can always be done with memory_order_relaxed: New // references to an object can only be formed from an existing // reference, and passing an existing reference from one thread to // another must already provide any required synchronization. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) let old_size = self.inner().count.fetch_add(1, Relaxed); // However we need to guard against massive refcounts in case someone // is `mem::forget`ing Arcs. If we don't do this the count can overflow // and users will use-after free. We racily saturate to `isize::MAX` on // the assumption that there aren't ~2 billion threads incrementing // the reference count at once. This branch will never be taken in // any realistic program. // // We abort because such a program is incredibly degenerate, and we // don't care to support it. if old_size > MAX_REFCOUNT { process::abort(); } Arc { p: NonZeroPtrMut::new(self.ptr()) } } } impl<T:?Sized> Deref for Arc<T> { type Target = T; #[inline] fn deref(&self) -> &T { &self.inner().data } } impl<T: Clone> Arc<T> { #[inline] pub fn make_mut(this: &mut Self) -> &mut T { if!this.is_unique() { // Another pointer exists; clone *this = Arc::new((**this).clone()); } unsafe { // This unsafety is ok because we're guaranteed that the pointer // returned is the *only* pointer that will ever be returned to T. Our // reference count is guaranteed to be 1 at this point, and we required // the Arc itself to be `mut`, so we're returning the only possible // reference to the inner data. &mut (*this.ptr()).data } } } impl<T:?Sized> Arc<T> { #[inline] pub fn get_mut(this: &mut Self) -> Option<&mut T> { if this.is_unique() { unsafe { // See make_mut() for documentation of the threadsafety here. Some(&mut (*this.ptr()).data) } } else { None } } #[inline] fn is_unique(&self) -> bool { // We can use Relaxed here, but the justification is a bit subtle. // // The reason to use Acquire would be to synchronize with other threads // that are modifying the refcount with Release, i.e. to ensure that // their writes to memory guarded by this refcount are flushed. However, // we know that threads only modify the contents of the Arc when they // observe the refcount to be 1, and no other thread could observe that // because we're holding one strong reference here. self.inner().count.load(Relaxed) == 1 } } impl<T:?Sized> Drop for Arc<T> { #[inline] fn drop(&mut self) { // Because `fetch_sub` is already atomic, we do not need to synchronize // with other threads unless we are going to delete the object. if self.inner().count.fetch_sub(1, Release)!= 1 { return; } // FIXME(bholley): Use the updated comment when [2] is merged. // // This load is needed to prevent reordering of use of the data and // deletion of the data. Because it is marked `Release`, the decreasing // of the reference count synchronizes with this `Acquire` load. This // means that use of the data happens before decreasing the reference // count, which happens before this load, which happens before the // deletion of the data. // // As explained in the [Boost documentation][1], // // > It is important to enforce any possible access to the object in one // > thread (through an existing reference) to *happen before* deleting // > the object in a different thread. This is achieved by a "release" // > operation after dropping a reference (any access to the object // > through this reference must obviously happened before), and an // > "acquire" operation before deleting the object. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) // [2]: https://github.com/rust-lang/rust/pull/41714 self.inner().count.load(Acquire); unsafe { self.drop_slow(); } } } impl<T:?Sized + PartialEq> PartialEq for Arc<T> { fn eq(&self, other: &Arc<T>) -> bool { *(*self) == *(*other) } fn ne(&self, other: &Arc<T>) -> bool { *(*self)!= *(*other) } } impl<T:?Sized + PartialOrd> PartialOrd for Arc<T> { fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> { (**self).partial_cmp(&**other) } fn lt(&self, other: &Arc<T>) -> bool { *(*self) < *(*other) } fn le(&self, other: &Arc<T>) -> bool { *(*self) <= *(*other) } fn gt(&self, other: &Arc<T>) -> bool { *(*self) > *(*other) } fn ge(&self, other: &Arc<T>) -> bool { *(*self) >= *(*other) } } impl<T:?Sized + Ord> Ord for Arc<T> { fn cmp(&self, other: &Arc<T>) -> Ordering { (**self).cmp(&**other) } } impl<T:?Sized + Eq> Eq for Arc<T> {} impl<T:?Sized + fmt::Display> fmt::Display for Arc<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&**self, f) } } impl<T:?Sized + fmt::Debug> fmt::Debug for Arc<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl<T:?Sized> fmt::Pointer for Arc<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.ptr(), f) } } impl<T: Default> Default for Arc<T> { fn default() -> Arc<T> { Arc::new(Default::default()) } } impl<T:?Sized + Hash> Hash for Arc<T> { fn hash<H: Hasher>(&self, state: &mut H) { (**self).hash(state) } } impl<T> From<T> for Arc<T> { fn from(t: T) -> Self { Arc::new(t) } } impl<T:?Sized> borrow::Borrow<T> for Arc<T> { fn borrow(&self) -> &T { &**self } } impl<T:?Sized> AsRef<T> for Arc<T> { fn as_ref(&self) -> &T { &**self } } // This is what the HeapSize crate does for regular arc, but is questionably // sound. See https://github.com/servo/heapsize/issues/37 #[cfg(feature = "servo")] impl<T: HeapSizeOf> HeapSizeOf for Arc<T> { fn heap_size_of_children(&self) -> usize { (**self).heap_size_of_children() } } #[cfg(feature = "servo")] impl<'de, T: Deserialize<'de>> Deserialize<'de> for Arc<T> { fn deserialize<D>(deserializer: D) -> Result<Arc<T>, D::Error> where D: ::serde::de::Deserializer<'de>, { T::deserialize(deserializer).map(Arc::new) } } #[cfg(feature = "servo")] impl<T: Serialize> Serialize for Arc<T> { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: ::serde::ser::Serializer, { (**self).serialize(serializer) } } /// Structure to allow Arc-managing some fixed-sized data and a variably-sized /// slice in a single allocation. #[derive(Debug, Eq, PartialEq, PartialOrd)] pub struct HeaderSlice<H, T:?Sized> { /// The fixed-sized data. pub header: H, /// The dynamically-sized data. pub slice: T, } #[inline(always)] fn divide_rounding_up(dividend: usize, divisor: usize) -> usize { (dividend + divisor - 1) / divisor } impl<H, T> Arc<HeaderSlice<H, [T]>> { /// Creates an Arc for a HeaderSlice using the given header struct and /// iterator to generate the slice. The resulting Arc will be fat. #[inline] pub fn from_header_and_iter<I>(header: H, mut items: I) -> Self where I: Iterator<Item=T> + ExactSizeIterator { use ::std::mem::size_of; assert!(size_of::<T>()!= 0, "Need to think about ZST"); // Compute the required size for the allocation. let num_items = items.len(); let size = { // First, determine the alignment of a hypothetical pointer to a // HeaderSlice. let fake_slice_ptr_align: usize = mem::align_of::<ArcInner<HeaderSlice<H, [T; 1]>>>(); // Next, synthesize a totally garbage (but properly aligned) pointer // to a sequence of T. let fake_slice_ptr = fake_slice_ptr_align as *const T; // Convert that sequence to a fat pointer. The address component of // the fat pointer will be garbage, but the length will be correct. let fake_slice = unsafe { slice::from_raw_parts(fake_slice_ptr, num_items) }; // Pretend the garbage address points to our allocation target (with // a trailing sequence of T), rather than just a sequence of T. let fake_ptr = fake_slice as *const [T] as *const ArcInner<HeaderSlice<H, [T]>>; let fake_ref: &ArcInner<HeaderSlice<H, [T]>> = unsafe { &*fake_ptr }; // Use size_of_val, which will combine static information about the // type with the length from the fat pointer. The garbage address // will not be used. mem::size_of_val(fake_ref) }; let ptr: *mut ArcInner<HeaderSlice<H, [T]>>; unsafe { // Allocate the buffer. We use Vec because the underlying allocation // machinery isn't available in stable Rust. // // To avoid alignment issues, we allocate words rather than bytes, // rounding up to the nearest word size. let buffer = if mem::align_of::<T>() <= mem::align_of::<usize>() { Self::allocate_buffer::<usize>(size) } else if mem::align_of::<T>() <= mem::align_of::<u64>() { // On 32-bit platforms <T> may have 8 byte alignment while usize has 4 byte aligment. // Use u64 to avoid over-alignment. // This branch will compile away in optimized builds. Self::allocate_buffer::<u64>(size) } else { panic!("Over-aligned type not handled"); }; // Synthesize the fat pointer. We do this by claiming we have a direct // pointer to a [T], and then changing the type of the borrow. The key // point here is that the length portion of the fat pointer applies // only to the number of elements in the dynamically-sized portion of // the type, so the value will be the same whether it points to a [T] // or something else with a [T] as its last member. let fake_slice: &mut [T] = slice::from_raw_parts_mut(buffer as *mut T, num_items); ptr = fake_slice as *mut [T] as *mut ArcInner<HeaderSlice<H, [T]>>; // Write the data. // // Note that any panics here (i.e. from the iterator) are safe, since // we'll just leak the uninitialized memory. ptr::write(&mut ((*ptr).count), atomic::AtomicUsize::new(1)); ptr::write(&mut ((*ptr).data.header), header); let mut current: *mut T = &mut (*ptr).data.slice[0]; for _ in 0..num_items { ptr::write(current, items.next().expect("ExactSizeIterator over-reported length")); current = current.offset(1); } assert!(items.next().is_none(), "ExactSizeIterator under-reported length"); // We should have consumed the buffer exactly. debug_assert!(current as *mut u8 == buffer.offset(size as isize)); } // Return the fat Arc. assert_eq!(size_of::<Self>(), size_of::<usize>() * 2, "The Arc will be fat"); Arc { p: NonZeroPtrMut::new(ptr) } } #[inline] unsafe fn allocate_buffer<W>(size: usize) -> *mut u8 { let words_to_allocate = divide_rounding_up(size, mem::size_of::<W>()); let mut vec = Vec::<W>::with_capacity(words_to_allocate); vec.set_len(words_to_allocate); Box::into_raw(vec.into_boxed_slice()) as *mut W as *mut u8 } } /// Header data with an inline length. Consumers that use HeaderWithLength as the /// Header type in HeaderSlice can take advantage of ThinArc. #[derive(Debug, Eq, PartialEq, PartialOrd)] pub struct HeaderWithLength<H> { /// The fixed-sized data. pub header: H, /// The slice length. length: usize, } impl<H> HeaderWithLength<H> { /// Creates a new HeaderWithLength. pub fn new(header: H, length: usize) -> Self { HeaderWithLength { header: header, length: length, } } } type HeaderSliceWithLength<H, T> = HeaderSlice<HeaderWithLength<H>, T>; pub struct ThinArc<H:'static, T:'static> { ptr: *mut ArcInner<HeaderSliceWithLength<H, [T; 1]>>, } unsafe impl<H: Sync + Send, T: Sync + Send> Send for ThinArc<H, T> {} unsafe impl<H: Sync + Send, T: Sync + Send> Sync for ThinArc<H, T> {} // Synthesize a fat pointer from a thin pointer. // // See the comment around the analogous operation in from_header_and_iter. fn thin_to_thick<H, T>(thin: *mut ArcInner<HeaderSliceWithLength<H, [T; 1]>>) -> *mut ArcInner<HeaderSliceWithLength<H, [T]>> { let len = unsafe { (*thin).data.header.length }; let fake_slice: *mut [T] = unsafe { slice::from_raw_parts_mut(thin as *mut T, len) }; fake_slice as *mut ArcInner<HeaderSliceWithLength<H, [T]>> } impl<H:'static, T:'static> ThinArc<H, T> { /// Temporarily converts |self| into a bonafide Arc and exposes it to the /// provided callback. The refcount is not modified. #[inline(always)] pub fn with_arc<F, U>(&self, f: F) -> U where F: FnOnce(&Arc<HeaderSliceWithLength<H, [T]>>) -> U { // Synthesize transient Arc, which never touches the refcount of the ArcInner. let transient = NoDrop::new(Arc { p: NonZeroPtrMut::new(thin_to_thick(self.ptr)) }); // Expose the transient Arc to the callback, which may clone it if it wants. let result = f(&transient); // Forget the transient Arc to leave the refcount untouched. mem::forget(transient); // Forward the result. result } } impl<H, T> Deref for ThinArc<H, T> { type Target = HeaderSliceWithLength<H, [T]>; fn deref(&self) -> &Self::Target { unsafe { &(*thin_to_thick(self.ptr)).data } } } impl<H:'static, T:'static> Clone for ThinArc<H, T
fmt
identifier_name
lib.rs
from the address of the struct. macro_rules! offset_of { ($container:path, $field:ident) => {{ // Make sure the field actually exists. This line ensures that a compile-time error is // generated if $field is accessed through a Deref impl. let $container { $field: _,.. }; // Create an (invalid) instance of the container and calculate the offset to its // field. Using a null pointer might be UB if `&(*(0 as *const T)).field` is interpreted to // be nullptr deref. let invalid: $container = ::std::mem::uninitialized(); let offset = &invalid.$field as *const _ as usize - &invalid as *const _ as usize; // Do not run destructors on the made up invalid instance. ::std::mem::forget(invalid); offset as isize }}; } /// A soft limit on the amount of references that may be made to an `Arc`. /// /// Going above this limit will abort your program (although not /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// Wrapper type for pointers to get the non-zero optimization. When /// NonZero/Shared/Unique are stabilized, we should just use Shared /// here to get the same effect. Gankro is working on this in [1]. /// /// It's unfortunate that this needs to infect all the caller types /// with'static. It would be nice to just use a &() and a PhantomData<T> /// instead, but then the compiler can't determine whether the &() should /// be thin or fat (which depends on whether or not T is sized). Given /// that this is all a temporary hack, this restriction is fine for now. /// /// [1] https://github.com/rust-lang/rust/issues/27730 pub struct NonZeroPtrMut<T:?Sized +'static>(&'static mut T); impl<T:?Sized> NonZeroPtrMut<T> { pub fn new(ptr: *mut T) -> Self { assert!(!(ptr as *mut u8).is_null()); NonZeroPtrMut(unsafe { mem::transmute(ptr) }) } pub fn ptr(&self) -> *mut T { self.0 as *const T as *mut T } } impl<T:?Sized +'static> Clone for NonZeroPtrMut<T> { fn clone(&self) -> Self { NonZeroPtrMut::new(self.ptr()) } } impl<T:?Sized +'static> fmt::Pointer for NonZeroPtrMut<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.ptr(), f) } } impl<T:?Sized +'static> fmt::Debug for NonZeroPtrMut<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { <Self as fmt::Pointer>::fmt(self, f) } } impl<T:?Sized +'static> PartialEq for NonZeroPtrMut<T> { fn eq(&self, other: &Self) -> bool
} impl<T:?Sized +'static> Eq for NonZeroPtrMut<T> {} pub struct Arc<T:?Sized +'static> { p: NonZeroPtrMut<ArcInner<T>>, } /// An Arc that is known to be uniquely owned /// /// This lets us build arcs that we can mutate before /// freezing, without needing to change the allocation pub struct UniqueArc<T:?Sized +'static>(Arc<T>); impl<T> UniqueArc<T> { #[inline] /// Construct a new UniqueArc pub fn new(data: T) -> Self { UniqueArc(Arc::new(data)) } #[inline] /// Convert to a shareable Arc<T> once we're done using it pub fn shareable(self) -> Arc<T> { self.0 } } impl<T> Deref for UniqueArc<T> { type Target = T; fn deref(&self) -> &T { &*self.0 } } impl<T> DerefMut for UniqueArc<T> { fn deref_mut(&mut self) -> &mut T { // We know this to be uniquely owned unsafe { &mut (*self.0.ptr()).data } } } unsafe impl<T:?Sized + Sync + Send> Send for Arc<T> {} unsafe impl<T:?Sized + Sync + Send> Sync for Arc<T> {} struct ArcInner<T:?Sized> { count: atomic::AtomicUsize, data: T, } unsafe impl<T:?Sized + Sync + Send> Send for ArcInner<T> {} unsafe impl<T:?Sized + Sync + Send> Sync for ArcInner<T> {} impl<T> Arc<T> { #[inline] pub fn new(data: T) -> Self { let x = Box::new(ArcInner { count: atomic::AtomicUsize::new(1), data: data, }); Arc { p: NonZeroPtrMut::new(Box::into_raw(x)) } } pub fn into_raw(this: Self) -> *const T { let ptr = unsafe { &((*this.ptr()).data) as *const _ }; mem::forget(this); ptr } pub unsafe fn from_raw(ptr: *const T) -> Self { // To find the corresponding pointer to the `ArcInner` we need // to subtract the offset of the `data` field from the pointer. let ptr = (ptr as *const u8).offset(-offset_of!(ArcInner<T>, data)); Arc { p: NonZeroPtrMut::new(ptr as *mut ArcInner<T>), } } } impl<T:?Sized> Arc<T> { #[inline] fn inner(&self) -> &ArcInner<T> { // This unsafety is ok because while this arc is alive we're guaranteed // that the inner pointer is valid. Furthermore, we know that the // `ArcInner` structure itself is `Sync` because the inner data is // `Sync` as well, so we're ok loaning out an immutable pointer to these // contents. unsafe { &*self.ptr() } } // Non-inlined part of `drop`. Just invokes the destructor. #[inline(never)] unsafe fn drop_slow(&mut self) { let _ = Box::from_raw(self.ptr()); } #[inline] pub fn ptr_eq(this: &Self, other: &Self) -> bool { this.ptr() == other.ptr() } fn ptr(&self) -> *mut ArcInner<T> { self.p.ptr() } } impl<T:?Sized> Clone for Arc<T> { #[inline] fn clone(&self) -> Self { // Using a relaxed ordering is alright here, as knowledge of the // original reference prevents other threads from erroneously deleting // the object. // // As explained in the [Boost documentation][1], Increasing the // reference counter can always be done with memory_order_relaxed: New // references to an object can only be formed from an existing // reference, and passing an existing reference from one thread to // another must already provide any required synchronization. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) let old_size = self.inner().count.fetch_add(1, Relaxed); // However we need to guard against massive refcounts in case someone // is `mem::forget`ing Arcs. If we don't do this the count can overflow // and users will use-after free. We racily saturate to `isize::MAX` on // the assumption that there aren't ~2 billion threads incrementing // the reference count at once. This branch will never be taken in // any realistic program. // // We abort because such a program is incredibly degenerate, and we // don't care to support it. if old_size > MAX_REFCOUNT { process::abort(); } Arc { p: NonZeroPtrMut::new(self.ptr()) } } } impl<T:?Sized> Deref for Arc<T> { type Target = T; #[inline] fn deref(&self) -> &T { &self.inner().data } } impl<T: Clone> Arc<T> { #[inline] pub fn make_mut(this: &mut Self) -> &mut T { if!this.is_unique() { // Another pointer exists; clone *this = Arc::new((**this).clone()); } unsafe { // This unsafety is ok because we're guaranteed that the pointer // returned is the *only* pointer that will ever be returned to T. Our // reference count is guaranteed to be 1 at this point, and we required // the Arc itself to be `mut`, so we're returning the only possible // reference to the inner data. &mut (*this.ptr()).data } } } impl<T:?Sized> Arc<T> { #[inline] pub fn get_mut(this: &mut Self) -> Option<&mut T> { if this.is_unique() { unsafe { // See make_mut() for documentation of the threadsafety here. Some(&mut (*this.ptr()).data) } } else { None } } #[inline] fn is_unique(&self) -> bool { // We can use Relaxed here, but the justification is a bit subtle. // // The reason to use Acquire would be to synchronize with other threads // that are modifying the refcount with Release, i.e. to ensure that // their writes to memory guarded by this refcount are flushed. However, // we know that threads only modify the contents of the Arc when they // observe the refcount to be 1, and no other thread could observe that // because we're holding one strong reference here. self.inner().count.load(Relaxed) == 1 } } impl<T:?Sized> Drop for Arc<T> { #[inline] fn drop(&mut self) { // Because `fetch_sub` is already atomic, we do not need to synchronize // with other threads unless we are going to delete the object. if self.inner().count.fetch_sub(1, Release)!= 1 { return; } // FIXME(bholley): Use the updated comment when [2] is merged. // // This load is needed to prevent reordering of use of the data and // deletion of the data. Because it is marked `Release`, the decreasing // of the reference count synchronizes with this `Acquire` load. This // means that use of the data happens before decreasing the reference // count, which happens before this load, which happens before the // deletion of the data. // // As explained in the [Boost documentation][1], // // > It is important to enforce any possible access to the object in one // > thread (through an existing reference) to *happen before* deleting // > the object in a different thread. This is achieved by a "release" // > operation after dropping a reference (any access to the object // > through this reference must obviously happened before), and an // > "acquire" operation before deleting the object. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) // [2]: https://github.com/rust-lang/rust/pull/41714 self.inner().count.load(Acquire); unsafe { self.drop_slow(); } } } impl<T:?Sized + PartialEq> PartialEq for Arc<T> { fn eq(&self, other: &Arc<T>) -> bool { *(*self) == *(*other) } fn ne(&self, other: &Arc<T>) -> bool { *(*self)!= *(*other) } } impl<T:?Sized + PartialOrd> PartialOrd for Arc<T> { fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> { (**self).partial_cmp(&**other) } fn lt(&self, other: &Arc<T>) -> bool { *(*self) < *(*other) } fn le(&self, other: &Arc<T>) -> bool { *(*self) <= *(*other) } fn gt(&self, other: &Arc<T>) -> bool { *(*self) > *(*other) } fn ge(&self, other: &Arc<T>) -> bool { *(*self) >= *(*other) } } impl<T:?Sized + Ord> Ord for Arc<T> { fn cmp(&self, other: &Arc<T>) -> Ordering { (**self).cmp(&**other) } } impl<T:?Sized + Eq> Eq for Arc<T> {} impl<T:?Sized + fmt::Display> fmt::Display for Arc<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&**self, f) } } impl<T:?Sized + fmt::Debug> fmt::Debug for Arc<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl<T:?Sized> fmt::Pointer for Arc<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.ptr(), f) } } impl<T: Default> Default for Arc<T> { fn default() -> Arc<T> { Arc::new(Default::default()) } } impl<T:?Sized + Hash> Hash for Arc<T> { fn hash<H: Hasher>(&self, state: &mut H) { (**self).hash(state) } } impl<T> From<T> for Arc<T> { fn from(t: T) -> Self { Arc::new(t) } } impl<T:?Sized> borrow::Borrow<T> for Arc<T> { fn borrow(&self) -> &T { &**self } } impl<T:?Sized> AsRef<T> for Arc<T> { fn as_ref(&self) -> &T { &**self } } // This is what the HeapSize crate does for regular arc, but is questionably // sound. See https://github.com/servo/heapsize/issues/37 #[cfg(feature = "servo")] impl<T: HeapSizeOf> HeapSizeOf for Arc<T> { fn heap_size_of_children(&self) -> usize { (**self).heap_size_of_children() } } #[cfg(feature = "servo")] impl<'de, T: Deserialize<'de>> Deserialize<'de> for Arc<T> { fn deserialize<D>(deserializer: D) -> Result<Arc<T>, D::Error> where D: ::serde::de::Deserializer<'de>, { T::deserialize(deserializer).map(Arc::new) } } #[cfg(feature = "servo")] impl<T: Serialize> Serialize for Arc<T> { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: ::serde::ser::Serializer, { (**self).serialize(serializer) } } /// Structure to allow Arc-managing some fixed-sized data and a variably-sized /// slice in a single allocation. #[derive(Debug, Eq, PartialEq, PartialOrd)] pub struct HeaderSlice<H, T:?Sized> { /// The fixed-sized data. pub header: H, /// The dynamically-sized data. pub slice: T, } #[inline(always)] fn divide_rounding_up(dividend: usize, divisor: usize) -> usize { (dividend + divisor - 1) / divisor } impl<H, T> Arc<HeaderSlice<H, [T]>> { /// Creates an Arc for a HeaderSlice using the given header struct and /// iterator to generate the slice. The resulting Arc will be fat. #[inline] pub fn from_header_and_iter<I>(header: H, mut items: I) -> Self where I: Iterator<Item=T> + ExactSizeIterator { use ::std::mem::size_of; assert!(size_of::<T>()!= 0, "Need to think about ZST"); // Compute the required size for the allocation. let num_items = items.len(); let size = { // First, determine the alignment of a hypothetical pointer to a // HeaderSlice. let fake_slice_ptr_align: usize = mem::align_of::<ArcInner<HeaderSlice<H, [T; 1]>>>(); // Next, synthesize a totally garbage (but properly aligned) pointer // to a sequence of T. let fake_slice_ptr = fake_slice_ptr_align as *const T; // Convert that sequence to a fat pointer. The address component of // the fat pointer will be garbage, but the length will be correct. let fake_slice = unsafe { slice::from_raw_parts(fake_slice_ptr, num_items) }; // Pretend the garbage address points to our allocation target (with // a trailing sequence of T), rather than just a sequence of T. let fake_ptr = fake_slice as *const [T] as *const ArcInner<HeaderSlice<H, [T]>>; let fake_ref: &ArcInner<HeaderSlice<H, [T]>> = unsafe { &*fake_ptr }; // Use size_of_val, which will combine static information about the // type with the length from the fat pointer. The garbage address // will not be used. mem::size_of_val(fake_ref) }; let ptr: *mut ArcInner<HeaderSlice<H, [T]>>; unsafe { // Allocate the buffer. We use Vec because the underlying allocation // machinery isn't available in stable Rust. // // To avoid alignment issues, we allocate words rather than bytes, // rounding up to the nearest word size. let buffer = if mem::align_of::<T>() <= mem::align_of::<usize>() { Self::allocate_buffer::<usize>(size) } else if mem::align_of::<T>() <= mem::align_of::<u64>() { // On 32-bit platforms <T> may have 8 byte alignment while usize has 4 byte aligment. // Use u64 to avoid over-alignment. // This branch will compile away in optimized builds. Self::allocate_buffer::<u64>(size) } else { panic!("Over-aligned type not handled"); }; // Synthesize the fat pointer. We do this by claiming we have a direct // pointer to a [T], and then changing the type of the borrow. The key // point here is that the length portion of the fat pointer applies // only to the number of elements in the dynamically-sized portion of // the type, so the value will be the same whether it points to a [T] // or something else with a [T] as its last member. let fake_slice: &mut [T] = slice::from_raw_parts_mut(buffer as *mut T, num_items); ptr = fake_slice as *mut [T] as *mut ArcInner<HeaderSlice<H, [T]>>; // Write the data. // // Note that any panics here (i.e. from the iterator) are safe, since // we'll just leak the uninitialized memory. ptr::write(&mut ((*ptr).count), atomic::AtomicUsize::new(1)); ptr::write(&mut ((*ptr).data.header), header); let mut current: *mut T = &mut (*ptr).data.slice[0]; for _ in 0..num_items { ptr::write(current, items.next().expect("ExactSizeIterator over-reported length")); current = current.offset(1); } assert!(items.next().is_none(), "ExactSizeIterator under-reported length"); // We should have consumed the buffer exactly. debug_assert!(current as *mut u8 == buffer.offset(size as isize)); } // Return the fat Arc. assert_eq!(size_of::<Self>(), size_of::<usize>() * 2, "The Arc will be fat"); Arc { p: NonZeroPtrMut::new(ptr) } } #[inline] unsafe fn allocate_buffer<W>(size: usize) -> *mut u8 { let words_to_allocate = divide_rounding_up(size, mem::size_of::<W>()); let mut vec = Vec::<W>::with_capacity(words_to_allocate); vec.set_len(words_to_allocate); Box::into_raw(vec.into_boxed_slice()) as *mut W as *mut u8 } } /// Header data with an inline length. Consumers that use HeaderWithLength as the /// Header type in HeaderSlice can take advantage of ThinArc. #[derive(Debug, Eq, PartialEq, PartialOrd)] pub struct HeaderWithLength<H> { /// The fixed-sized data. pub header: H, /// The slice length. length: usize, } impl<H> HeaderWithLength<H> { /// Creates a new HeaderWithLength. pub fn new(header: H, length: usize) -> Self { HeaderWithLength { header: header, length: length, } } } type HeaderSliceWithLength<H, T> = HeaderSlice<HeaderWithLength<H>, T>; pub struct ThinArc<H:'static, T:'static> { ptr: *mut ArcInner<HeaderSliceWithLength<H, [T; 1]>>, } unsafe impl<H: Sync + Send, T: Sync + Send> Send for ThinArc<H, T> {} unsafe impl<H: Sync + Send, T: Sync + Send> Sync for ThinArc<H, T> {} // Synthesize a fat pointer from a thin pointer. // // See the comment around the analogous operation in from_header_and_iter. fn thin_to_thick<H, T>(thin: *mut ArcInner<HeaderSliceWithLength<H, [T; 1]>>) -> *mut ArcInner<HeaderSliceWithLength<H, [T]>> { let len = unsafe { (*thin).data.header.length }; let fake_slice: *mut [T] = unsafe { slice::from_raw_parts_mut(thin as *mut T, len) }; fake_slice as *mut ArcInner<HeaderSliceWithLength<H, [T]>> } impl<H:'static, T:'static> ThinArc<H, T> { /// Temporarily converts |self| into a bonafide Arc and exposes it to the /// provided callback. The refcount is not modified. #[inline(always)] pub fn with_arc<F, U>(&self, f: F) -> U where F: FnOnce(&Arc<HeaderSliceWithLength<H, [T]>>) -> U { // Synthesize transient Arc, which never touches the refcount of the ArcInner. let transient = NoDrop::new(Arc { p: NonZeroPtrMut::new(thin_to_thick(self.ptr)) }); // Expose the transient Arc to the callback, which may clone it if it wants. let result = f(&transient); // Forget the transient Arc to leave the refcount untouched. mem::forget(transient); // Forward the result. result } } impl<H, T> Deref for ThinArc<H, T> { type Target = HeaderSliceWithLength<H, [T]>; fn deref(&self) -> &Self::Target { unsafe { &(*thin_to_thick(self.ptr)).data } } } impl<H:'static, T:'static> Clone for ThinArc<H, T
{ self.ptr() == other.ptr() }
identifier_body
manual_migration.rs
// Copyright 2020 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! This example shows how to provide database data migration manually. //! //! The main logic is described in the `manual_migration` and `migrate_wallets` functions. //! //! The main points of this example are: //! //! - We manually create a `Fork` from the DB, as well as `Migration` and `Prefixed` access //! to the data. //! - We manually apply the resulting `Patch` to the DB. //! //! For the description of the common migration scenario, see the `migration` module docs. use exonum_merkledb::{ access::Prefixed, migration::Migration, Database, Fork, ObjectHash, ReadonlyFork, }; use std::sync::Arc; mod migration; use crate::migration::{perform_migration, v1, v2}; /// Provides migration of wallets with schema. /// /// - `Wallet.public_key` field is removed. /// - `Wallet.history_hash` field is added. /// - Wallets and wallet history belonging to the users named "Eve' are dropped. fn migrate_wallets(new_data: Migration<&Fork>, old_data: Prefixed<ReadonlyFork<'_>>) { let old_schema = v1::Schema::new(old_data); let mut new_schema = v2::Schema::new(new_data.clone()); // Migrate wallets. for (i, (public_key, wallet)) in old_schema.wallets.iter().enumerate() { if wallet.username == "Eve" { // We don't like Eves 'round these parts. Remove her transaction history // and don't migrate the wallet. new_data.create_tombstone(("histories", &public_key)); } else { // Merkelize the wallet history. let mut history = new_schema.histories.get(&public_key); history.extend(&old_schema.histories.get(&public_key)); let new_wallet = v2::Wallet { username: wallet.username, balance: wallet.balance, history_hash: history.object_hash(), }; new_schema.wallets.put(&public_key, new_wallet); } if i % 1_000 == 999 { println!("Processed {} wallets", i + 1); } } } fn
(db: Arc<dyn Database>) { // Create fork to apply changes to it. let fork = db.fork(); { let new_data = Migration::new("test", &fork); let mut new_schema = v2::Schema::new(new_data.clone()); let old_data = Prefixed::new("test", fork.readonly()); let old_schema = v1::Schema::new(old_data); // Move `ticker` and `divisibility` to `config`. let config = v2::Config { ticker: old_schema.ticker.get().unwrap(), divisibility: old_schema.divisibility.get().unwrap_or(0), }; new_schema.config.set(config); // Mark these two indexes for removal. new_data.create_tombstone("ticker"); new_data.create_tombstone("divisibility"); } let new_data = Migration::new("test", &fork); let old_data = Prefixed::new("test", fork.readonly()); migrate_wallets(new_data, old_data); // Merge patch with migrated data. db.merge(fork.into_patch()).unwrap(); } fn main() { perform_migration(manual_migration); }
manual_migration
identifier_name
manual_migration.rs
// Copyright 2020 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! This example shows how to provide database data migration manually. //! //! The main logic is described in the `manual_migration` and `migrate_wallets` functions. //! //! The main points of this example are: //! //! - We manually create a `Fork` from the DB, as well as `Migration` and `Prefixed` access //! to the data. //! - We manually apply the resulting `Patch` to the DB. //! //! For the description of the common migration scenario, see the `migration` module docs. use exonum_merkledb::{ access::Prefixed, migration::Migration, Database, Fork, ObjectHash, ReadonlyFork, }; use std::sync::Arc; mod migration; use crate::migration::{perform_migration, v1, v2}; /// Provides migration of wallets with schema. /// /// - `Wallet.public_key` field is removed. /// - `Wallet.history_hash` field is added. /// - Wallets and wallet history belonging to the users named "Eve' are dropped. fn migrate_wallets(new_data: Migration<&Fork>, old_data: Prefixed<ReadonlyFork<'_>>)
new_schema.wallets.put(&public_key, new_wallet); } if i % 1_000 == 999 { println!("Processed {} wallets", i + 1); } } } fn manual_migration(db: Arc<dyn Database>) { // Create fork to apply changes to it. let fork = db.fork(); { let new_data = Migration::new("test", &fork); let mut new_schema = v2::Schema::new(new_data.clone()); let old_data = Prefixed::new("test", fork.readonly()); let old_schema = v1::Schema::new(old_data); // Move `ticker` and `divisibility` to `config`. let config = v2::Config { ticker: old_schema.ticker.get().unwrap(), divisibility: old_schema.divisibility.get().unwrap_or(0), }; new_schema.config.set(config); // Mark these two indexes for removal. new_data.create_tombstone("ticker"); new_data.create_tombstone("divisibility"); } let new_data = Migration::new("test", &fork); let old_data = Prefixed::new("test", fork.readonly()); migrate_wallets(new_data, old_data); // Merge patch with migrated data. db.merge(fork.into_patch()).unwrap(); } fn main() { perform_migration(manual_migration); }
{ let old_schema = v1::Schema::new(old_data); let mut new_schema = v2::Schema::new(new_data.clone()); // Migrate wallets. for (i, (public_key, wallet)) in old_schema.wallets.iter().enumerate() { if wallet.username == "Eve" { // We don't like Eves 'round these parts. Remove her transaction history // and don't migrate the wallet. new_data.create_tombstone(("histories", &public_key)); } else { // Merkelize the wallet history. let mut history = new_schema.histories.get(&public_key); history.extend(&old_schema.histories.get(&public_key)); let new_wallet = v2::Wallet { username: wallet.username, balance: wallet.balance, history_hash: history.object_hash(), };
identifier_body
manual_migration.rs
// Copyright 2020 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! This example shows how to provide database data migration manually. //! //! The main logic is described in the `manual_migration` and `migrate_wallets` functions. //! //! The main points of this example are: //! //! - We manually create a `Fork` from the DB, as well as `Migration` and `Prefixed` access //! to the data. //! - We manually apply the resulting `Patch` to the DB. //! //! For the description of the common migration scenario, see the `migration` module docs. use exonum_merkledb::{ access::Prefixed, migration::Migration, Database, Fork, ObjectHash, ReadonlyFork, }; use std::sync::Arc; mod migration; use crate::migration::{perform_migration, v1, v2}; /// Provides migration of wallets with schema. /// /// - `Wallet.public_key` field is removed. /// - `Wallet.history_hash` field is added.
// Migrate wallets. for (i, (public_key, wallet)) in old_schema.wallets.iter().enumerate() { if wallet.username == "Eve" { // We don't like Eves 'round these parts. Remove her transaction history // and don't migrate the wallet. new_data.create_tombstone(("histories", &public_key)); } else { // Merkelize the wallet history. let mut history = new_schema.histories.get(&public_key); history.extend(&old_schema.histories.get(&public_key)); let new_wallet = v2::Wallet { username: wallet.username, balance: wallet.balance, history_hash: history.object_hash(), }; new_schema.wallets.put(&public_key, new_wallet); } if i % 1_000 == 999 { println!("Processed {} wallets", i + 1); } } } fn manual_migration(db: Arc<dyn Database>) { // Create fork to apply changes to it. let fork = db.fork(); { let new_data = Migration::new("test", &fork); let mut new_schema = v2::Schema::new(new_data.clone()); let old_data = Prefixed::new("test", fork.readonly()); let old_schema = v1::Schema::new(old_data); // Move `ticker` and `divisibility` to `config`. let config = v2::Config { ticker: old_schema.ticker.get().unwrap(), divisibility: old_schema.divisibility.get().unwrap_or(0), }; new_schema.config.set(config); // Mark these two indexes for removal. new_data.create_tombstone("ticker"); new_data.create_tombstone("divisibility"); } let new_data = Migration::new("test", &fork); let old_data = Prefixed::new("test", fork.readonly()); migrate_wallets(new_data, old_data); // Merge patch with migrated data. db.merge(fork.into_patch()).unwrap(); } fn main() { perform_migration(manual_migration); }
/// - Wallets and wallet history belonging to the users named "Eve' are dropped. fn migrate_wallets(new_data: Migration<&Fork>, old_data: Prefixed<ReadonlyFork<'_>>) { let old_schema = v1::Schema::new(old_data); let mut new_schema = v2::Schema::new(new_data.clone());
random_line_split
manual_migration.rs
// Copyright 2020 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! This example shows how to provide database data migration manually. //! //! The main logic is described in the `manual_migration` and `migrate_wallets` functions. //! //! The main points of this example are: //! //! - We manually create a `Fork` from the DB, as well as `Migration` and `Prefixed` access //! to the data. //! - We manually apply the resulting `Patch` to the DB. //! //! For the description of the common migration scenario, see the `migration` module docs. use exonum_merkledb::{ access::Prefixed, migration::Migration, Database, Fork, ObjectHash, ReadonlyFork, }; use std::sync::Arc; mod migration; use crate::migration::{perform_migration, v1, v2}; /// Provides migration of wallets with schema. /// /// - `Wallet.public_key` field is removed. /// - `Wallet.history_hash` field is added. /// - Wallets and wallet history belonging to the users named "Eve' are dropped. fn migrate_wallets(new_data: Migration<&Fork>, old_data: Prefixed<ReadonlyFork<'_>>) { let old_schema = v1::Schema::new(old_data); let mut new_schema = v2::Schema::new(new_data.clone()); // Migrate wallets. for (i, (public_key, wallet)) in old_schema.wallets.iter().enumerate() { if wallet.username == "Eve" { // We don't like Eves 'round these parts. Remove her transaction history // and don't migrate the wallet. new_data.create_tombstone(("histories", &public_key)); } else
if i % 1_000 == 999 { println!("Processed {} wallets", i + 1); } } } fn manual_migration(db: Arc<dyn Database>) { // Create fork to apply changes to it. let fork = db.fork(); { let new_data = Migration::new("test", &fork); let mut new_schema = v2::Schema::new(new_data.clone()); let old_data = Prefixed::new("test", fork.readonly()); let old_schema = v1::Schema::new(old_data); // Move `ticker` and `divisibility` to `config`. let config = v2::Config { ticker: old_schema.ticker.get().unwrap(), divisibility: old_schema.divisibility.get().unwrap_or(0), }; new_schema.config.set(config); // Mark these two indexes for removal. new_data.create_tombstone("ticker"); new_data.create_tombstone("divisibility"); } let new_data = Migration::new("test", &fork); let old_data = Prefixed::new("test", fork.readonly()); migrate_wallets(new_data, old_data); // Merge patch with migrated data. db.merge(fork.into_patch()).unwrap(); } fn main() { perform_migration(manual_migration); }
{ // Merkelize the wallet history. let mut history = new_schema.histories.get(&public_key); history.extend(&old_schema.histories.get(&public_key)); let new_wallet = v2::Wallet { username: wallet.username, balance: wallet.balance, history_hash: history.object_hash(), }; new_schema.wallets.put(&public_key, new_wallet); }
conditional_block
mod.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use common::SourceLocationKey; use fixture_tests::Fixture; use graphql_ir::{build, Program}; use graphql_syntax::parse_executable; use graphql_test_helpers::diagnostics_to_sorted_string; use relay_test_schema::get_test_schema_with_extensions; use relay_transforms::validate_server_only_directives; use std::sync::Arc; pub fn transform_fixture(fixture: &Fixture<'_>) -> Result<String, String> { let parts: Vec<_> = fixture.content.split("%extensions%").collect(); if let [base, extensions] = parts.as_slice() { let source_location = SourceLocationKey::standalone(fixture.file_name); let ast = parse_executable(base, source_location).unwrap(); let schema = get_test_schema_with_extensions(extensions); let ir = build(&schema, &ast.definitions).unwrap(); let program = Program::from_definitions(Arc::clone(&schema), ir); validate_server_only_directives(&program) .map_err(|diagnostics| diagnostics_to_sorted_string(fixture.content, &diagnostics))?; Ok("OK".to_owned()) } else
}
{ panic!("Expected exactly one %extensions% section marker.") }
conditional_block
mod.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */
use graphql_syntax::parse_executable; use graphql_test_helpers::diagnostics_to_sorted_string; use relay_test_schema::get_test_schema_with_extensions; use relay_transforms::validate_server_only_directives; use std::sync::Arc; pub fn transform_fixture(fixture: &Fixture<'_>) -> Result<String, String> { let parts: Vec<_> = fixture.content.split("%extensions%").collect(); if let [base, extensions] = parts.as_slice() { let source_location = SourceLocationKey::standalone(fixture.file_name); let ast = parse_executable(base, source_location).unwrap(); let schema = get_test_schema_with_extensions(extensions); let ir = build(&schema, &ast.definitions).unwrap(); let program = Program::from_definitions(Arc::clone(&schema), ir); validate_server_only_directives(&program) .map_err(|diagnostics| diagnostics_to_sorted_string(fixture.content, &diagnostics))?; Ok("OK".to_owned()) } else { panic!("Expected exactly one %extensions% section marker.") } }
use common::SourceLocationKey; use fixture_tests::Fixture; use graphql_ir::{build, Program};
random_line_split
mod.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use common::SourceLocationKey; use fixture_tests::Fixture; use graphql_ir::{build, Program}; use graphql_syntax::parse_executable; use graphql_test_helpers::diagnostics_to_sorted_string; use relay_test_schema::get_test_schema_with_extensions; use relay_transforms::validate_server_only_directives; use std::sync::Arc; pub fn transform_fixture(fixture: &Fixture<'_>) -> Result<String, String>
{ let parts: Vec<_> = fixture.content.split("%extensions%").collect(); if let [base, extensions] = parts.as_slice() { let source_location = SourceLocationKey::standalone(fixture.file_name); let ast = parse_executable(base, source_location).unwrap(); let schema = get_test_schema_with_extensions(extensions); let ir = build(&schema, &ast.definitions).unwrap(); let program = Program::from_definitions(Arc::clone(&schema), ir); validate_server_only_directives(&program) .map_err(|diagnostics| diagnostics_to_sorted_string(fixture.content, &diagnostics))?; Ok("OK".to_owned()) } else { panic!("Expected exactly one %extensions% section marker.") } }
identifier_body
mod.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use common::SourceLocationKey; use fixture_tests::Fixture; use graphql_ir::{build, Program}; use graphql_syntax::parse_executable; use graphql_test_helpers::diagnostics_to_sorted_string; use relay_test_schema::get_test_schema_with_extensions; use relay_transforms::validate_server_only_directives; use std::sync::Arc; pub fn
(fixture: &Fixture<'_>) -> Result<String, String> { let parts: Vec<_> = fixture.content.split("%extensions%").collect(); if let [base, extensions] = parts.as_slice() { let source_location = SourceLocationKey::standalone(fixture.file_name); let ast = parse_executable(base, source_location).unwrap(); let schema = get_test_schema_with_extensions(extensions); let ir = build(&schema, &ast.definitions).unwrap(); let program = Program::from_definitions(Arc::clone(&schema), ir); validate_server_only_directives(&program) .map_err(|diagnostics| diagnostics_to_sorted_string(fixture.content, &diagnostics))?; Ok("OK".to_owned()) } else { panic!("Expected exactly one %extensions% section marker.") } }
transform_fixture
identifier_name
lib.rs
#![feature(test)] extern crate time; extern crate test; #[test]
// pretty print pub fn pretty_print(e : &str) { let t = time::now(); let s = time::strftime("%Y-%m-%d %H:%M:%S", &t); if s.is_ok() { println!("{0}\t{1}", s.unwrap(), e); } else { println!("####-##-## ##:##:##\t{}", e); } } // mean of array pub fn mean(a : &[f32]) -> f32 { let mut sum = 0.0; let mut n = 0.0; for p in a.iter() { sum = sum + *p; // need to (*) dereference pointers n = n + 1.0; } let f = sum / n; return f; } // median of array pub fn median(a : &[f32]) -> f32 { let l = a.len(); let f = a[l]; return f; }
fn it_works() { }
random_line_split
lib.rs
#![feature(test)] extern crate time; extern crate test; #[test] fn it_works() { } // pretty print pub fn pretty_print(e : &str)
// mean of array pub fn mean(a : &[f32]) -> f32 { let mut sum = 0.0; let mut n = 0.0; for p in a.iter() { sum = sum + *p; // need to (*) dereference pointers n = n + 1.0; } let f = sum / n; return f; } // median of array pub fn median(a : &[f32]) -> f32 { let l = a.len(); let f = a[l]; return f; }
{ let t = time::now(); let s = time::strftime("%Y-%m-%d %H:%M:%S", &t); if s.is_ok() { println!("{0}\t{1}", s.unwrap(), e); } else { println!("####-##-## ##:##:##\t{}", e); } }
identifier_body
lib.rs
#![feature(test)] extern crate time; extern crate test; #[test] fn
() { } // pretty print pub fn pretty_print(e : &str) { let t = time::now(); let s = time::strftime("%Y-%m-%d %H:%M:%S", &t); if s.is_ok() { println!("{0}\t{1}", s.unwrap(), e); } else { println!("####-##-## ##:##:##\t{}", e); } } // mean of array pub fn mean(a : &[f32]) -> f32 { let mut sum = 0.0; let mut n = 0.0; for p in a.iter() { sum = sum + *p; // need to (*) dereference pointers n = n + 1.0; } let f = sum / n; return f; } // median of array pub fn median(a : &[f32]) -> f32 { let l = a.len(); let f = a[l]; return f; }
it_works
identifier_name
lib.rs
#![feature(test)] extern crate time; extern crate test; #[test] fn it_works() { } // pretty print pub fn pretty_print(e : &str) { let t = time::now(); let s = time::strftime("%Y-%m-%d %H:%M:%S", &t); if s.is_ok()
else { println!("####-##-## ##:##:##\t{}", e); } } // mean of array pub fn mean(a : &[f32]) -> f32 { let mut sum = 0.0; let mut n = 0.0; for p in a.iter() { sum = sum + *p; // need to (*) dereference pointers n = n + 1.0; } let f = sum / n; return f; } // median of array pub fn median(a : &[f32]) -> f32 { let l = a.len(); let f = a[l]; return f; }
{ println!("{0}\t{1}", s.unwrap(), e); }
conditional_block
sph_echo_test.rs
extern crate sphlib; extern crate libc; use sphlib::{sph_echo, utils}; #[test] fn will_be_224_hash()
#[test] fn will_be_256_hash() { let dest = sph_echo::echo256_init_load_close(""); let actual = utils::to_hex_hash(&dest); assert_eq!("4496cd09d425999aefa75189ee7fd3c97362aa9e4ca898328002d20a4b519788", actual.to_string()); } #[test] fn will_be_384_hash() { let dest = sph_echo::echo384_init_load_close(""); let actual = utils::to_hex_hash(&dest); assert_eq!("134040763f840559b84b7a1ae5d6d64fc3659821a789cc64a7f1444c09ee7f81a54d72beee8273bae5ef18ec43aa5f34", actual.to_string()); } #[test] fn will_be_512_hash() { let dest = sph_echo::echo512_init_load_close(""); let actual = utils::to_hex_hash(&dest); assert_eq!("158f58cc79d300a9aa292515049275d051a28ab931726d0ec44bdd9faef4a702c36db9e7922fff077402236465833c5cc76af4efc352b4b44c7fa15aa0ef234e", actual.to_string()); }
{ let dest = sph_echo::echo224_init_load_close(""); let actual = utils::to_hex_hash(&dest); assert_eq!("17da087595166f733fff7cdb0bca6438f303d0e00c48b5e7a3075905", actual.to_string()); }
identifier_body
sph_echo_test.rs
extern crate sphlib; extern crate libc; use sphlib::{sph_echo, utils}; #[test] fn will_be_224_hash() { let dest = sph_echo::echo224_init_load_close(""); let actual = utils::to_hex_hash(&dest); assert_eq!("17da087595166f733fff7cdb0bca6438f303d0e00c48b5e7a3075905", actual.to_string()); }
assert_eq!("4496cd09d425999aefa75189ee7fd3c97362aa9e4ca898328002d20a4b519788", actual.to_string()); } #[test] fn will_be_384_hash() { let dest = sph_echo::echo384_init_load_close(""); let actual = utils::to_hex_hash(&dest); assert_eq!("134040763f840559b84b7a1ae5d6d64fc3659821a789cc64a7f1444c09ee7f81a54d72beee8273bae5ef18ec43aa5f34", actual.to_string()); } #[test] fn will_be_512_hash() { let dest = sph_echo::echo512_init_load_close(""); let actual = utils::to_hex_hash(&dest); assert_eq!("158f58cc79d300a9aa292515049275d051a28ab931726d0ec44bdd9faef4a702c36db9e7922fff077402236465833c5cc76af4efc352b4b44c7fa15aa0ef234e", actual.to_string()); }
#[test] fn will_be_256_hash() { let dest = sph_echo::echo256_init_load_close(""); let actual = utils::to_hex_hash(&dest);
random_line_split
sph_echo_test.rs
extern crate sphlib; extern crate libc; use sphlib::{sph_echo, utils}; #[test] fn will_be_224_hash() { let dest = sph_echo::echo224_init_load_close(""); let actual = utils::to_hex_hash(&dest); assert_eq!("17da087595166f733fff7cdb0bca6438f303d0e00c48b5e7a3075905", actual.to_string()); } #[test] fn
() { let dest = sph_echo::echo256_init_load_close(""); let actual = utils::to_hex_hash(&dest); assert_eq!("4496cd09d425999aefa75189ee7fd3c97362aa9e4ca898328002d20a4b519788", actual.to_string()); } #[test] fn will_be_384_hash() { let dest = sph_echo::echo384_init_load_close(""); let actual = utils::to_hex_hash(&dest); assert_eq!("134040763f840559b84b7a1ae5d6d64fc3659821a789cc64a7f1444c09ee7f81a54d72beee8273bae5ef18ec43aa5f34", actual.to_string()); } #[test] fn will_be_512_hash() { let dest = sph_echo::echo512_init_load_close(""); let actual = utils::to_hex_hash(&dest); assert_eq!("158f58cc79d300a9aa292515049275d051a28ab931726d0ec44bdd9faef4a702c36db9e7922fff077402236465833c5cc76af4efc352b4b44c7fa15aa0ef234e", actual.to_string()); }
will_be_256_hash
identifier_name
read.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ use crate::repo::AdminRepo; use anyhow::{Error, Result}; use blobstore::Blobstore; use context::CoreContext; use skiplist::{deserialize_skiplist_index, SkiplistIndex}; use slog::{debug, info, Logger}; pub async fn read_skiplist( ctx: &CoreContext, repo: &AdminRepo, logger: &Logger, blobstore_key: String, ) -> Result<()> { let maybe_index = get_skiplist_index(ctx, repo, logger, blobstore_key).await?; match maybe_index { Some(index) => { info!( logger, "skiplist graph has {} entries", index.indexed_node_count() ); } None => { info!(logger, "skiplist not found"); } }; Ok(()) } pub async fn get_skiplist_index(
let maybebytes = repo.repo_blobstore.get(ctx, &blobstore_key).await?; match maybebytes { Some(bytes) => { debug!( logger, "received {} bytes from blobstore", bytes.as_bytes().len() ); let bytes = bytes.into_raw_bytes(); Ok(Some(deserialize_skiplist_index(logger.clone(), bytes)?)) } None => Ok(None), } }
ctx: &CoreContext, repo: &AdminRepo, logger: &Logger, blobstore_key: String, ) -> Result<Option<SkiplistIndex>, Error> {
random_line_split
read.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ use crate::repo::AdminRepo; use anyhow::{Error, Result}; use blobstore::Blobstore; use context::CoreContext; use skiplist::{deserialize_skiplist_index, SkiplistIndex}; use slog::{debug, info, Logger}; pub async fn read_skiplist( ctx: &CoreContext, repo: &AdminRepo, logger: &Logger, blobstore_key: String, ) -> Result<()> { let maybe_index = get_skiplist_index(ctx, repo, logger, blobstore_key).await?; match maybe_index { Some(index) => { info!( logger, "skiplist graph has {} entries", index.indexed_node_count() ); } None => { info!(logger, "skiplist not found"); } }; Ok(()) } pub async fn get_skiplist_index( ctx: &CoreContext, repo: &AdminRepo, logger: &Logger, blobstore_key: String, ) -> Result<Option<SkiplistIndex>, Error>
{ let maybebytes = repo.repo_blobstore.get(ctx, &blobstore_key).await?; match maybebytes { Some(bytes) => { debug!( logger, "received {} bytes from blobstore", bytes.as_bytes().len() ); let bytes = bytes.into_raw_bytes(); Ok(Some(deserialize_skiplist_index(logger.clone(), bytes)?)) } None => Ok(None), } }
identifier_body
read.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ use crate::repo::AdminRepo; use anyhow::{Error, Result}; use blobstore::Blobstore; use context::CoreContext; use skiplist::{deserialize_skiplist_index, SkiplistIndex}; use slog::{debug, info, Logger}; pub async fn read_skiplist( ctx: &CoreContext, repo: &AdminRepo, logger: &Logger, blobstore_key: String, ) -> Result<()> { let maybe_index = get_skiplist_index(ctx, repo, logger, blobstore_key).await?; match maybe_index { Some(index) => { info!( logger, "skiplist graph has {} entries", index.indexed_node_count() ); } None => { info!(logger, "skiplist not found"); } }; Ok(()) } pub async fn
( ctx: &CoreContext, repo: &AdminRepo, logger: &Logger, blobstore_key: String, ) -> Result<Option<SkiplistIndex>, Error> { let maybebytes = repo.repo_blobstore.get(ctx, &blobstore_key).await?; match maybebytes { Some(bytes) => { debug!( logger, "received {} bytes from blobstore", bytes.as_bytes().len() ); let bytes = bytes.into_raw_bytes(); Ok(Some(deserialize_skiplist_index(logger.clone(), bytes)?)) } None => Ok(None), } }
get_skiplist_index
identifier_name
read.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ use crate::repo::AdminRepo; use anyhow::{Error, Result}; use blobstore::Blobstore; use context::CoreContext; use skiplist::{deserialize_skiplist_index, SkiplistIndex}; use slog::{debug, info, Logger}; pub async fn read_skiplist( ctx: &CoreContext, repo: &AdminRepo, logger: &Logger, blobstore_key: String, ) -> Result<()> { let maybe_index = get_skiplist_index(ctx, repo, logger, blobstore_key).await?; match maybe_index { Some(index) => { info!( logger, "skiplist graph has {} entries", index.indexed_node_count() ); } None =>
}; Ok(()) } pub async fn get_skiplist_index( ctx: &CoreContext, repo: &AdminRepo, logger: &Logger, blobstore_key: String, ) -> Result<Option<SkiplistIndex>, Error> { let maybebytes = repo.repo_blobstore.get(ctx, &blobstore_key).await?; match maybebytes { Some(bytes) => { debug!( logger, "received {} bytes from blobstore", bytes.as_bytes().len() ); let bytes = bytes.into_raw_bytes(); Ok(Some(deserialize_skiplist_index(logger.clone(), bytes)?)) } None => Ok(None), } }
{ info!(logger, "skiplist not found"); }
conditional_block
frame.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use msg::constellation_msg::{FrameId, PipelineId}; use pipeline::Pipeline; use script_traits::LoadData; use std::collections::HashMap; use std::iter::once; use std::mem::replace; use std::time::Instant; /// A frame in the frame tree. /// Each frame is the constellation's view of a browsing context. /// Each browsing context has a session history, caused by /// navigation and traversing the history. Each frame has its /// current entry, plus past and future entries. The past is sorted /// chronologically, the future is sorted reverse chronologically: /// in particular prev.pop() is the latest past entry, and /// next.pop() is the earliest future entry. pub struct Frame { /// The frame id. pub id: FrameId, /// The timestamp for the current session history entry pub instant: Instant, /// The pipeline for the current session history entry pub pipeline_id: PipelineId, /// The load data for the current session history entry pub load_data: LoadData, /// The past session history, ordered chronologically. pub prev: Vec<FrameState>, /// The future session history, ordered reverse chronologically. pub next: Vec<FrameState>, } impl Frame { /// Create a new frame. /// Note this just creates the frame, it doesn't add it to the frame tree. pub fn new(id: FrameId, pipeline_id: PipelineId, load_data: LoadData) -> Frame { Frame { id: id, pipeline_id: pipeline_id, instant: Instant::now(), load_data: load_data, prev: vec!(), next: vec!(), } } /// Get the current frame state. pub fn current(&self) -> FrameState { FrameState { instant: self.instant, frame_id: self.id, pipeline_id: Some(self.pipeline_id), load_data: self.load_data.clone(), } } /// Set the current frame entry, and push the current frame entry into the past. pub fn load(&mut self, pipeline_id: PipelineId, load_data: LoadData) { let current = self.current(); self.prev.push(current); self.instant = Instant::now(); self.pipeline_id = pipeline_id; self.load_data = load_data; } /// Set the future to be empty. pub fn remove_forward_entries(&mut self) -> Vec<FrameState> { replace(&mut self.next, vec!()) } /// Update the current entry of the Frame from an entry that has been traversed to. pub fn update_current(&mut self, pipeline_id: PipelineId, entry: FrameState) { self.pipeline_id = pipeline_id; self.instant = entry.instant; self.load_data = entry.load_data; } } /// An entry in a frame's session history. /// Each entry stores the pipeline id for a document in the session history. /// /// When we operate on the joint session history, entries are sorted chronologically, /// so we timestamp the entries by when the entry was added to the session history. #[derive(Clone)] pub struct FrameState { /// The timestamp for when the session history entry was created pub instant: Instant, /// The pipeline for the document in the session history, /// None if the entry has been discarded pub pipeline_id: Option<PipelineId>, /// The load data for this entry, used to reload the pipeline if it has been discarded pub load_data: LoadData, /// The frame that this session history entry is part of pub frame_id: FrameId, } /// Represents a pending change in the frame tree, that will be applied /// once the new pipeline has loaded and completed initial layout / paint. pub struct FrameChange { /// The frame to change. pub frame_id: FrameId, /// The pipeline for the document being loaded. pub new_pipeline_id: PipelineId, /// The data for the document being loaded. pub load_data: LoadData, /// Is the new document replacing the current document (e.g. a reload) /// or pushing it into the session history (e.g. a navigation)? /// If it is replacing an existing entry, we store its timestamp. pub replace_instant: Option<Instant>, } /// An iterator over a frame tree, returning the fully active frames in /// depth-first order. Note that this iterator only returns the fully /// active frames, that is ones where every ancestor frame is /// in the currently active pipeline of its parent frame. pub struct FrameTreeIterator<'a> { /// The frames still to iterate over. pub stack: Vec<FrameId>, /// The set of all frames. pub frames: &'a HashMap<FrameId, Frame>, /// The set of all pipelines. We use this to find the active /// children of a frame, which are the iframes in the currently /// active document. pub pipelines: &'a HashMap<PipelineId, Pipeline>, } impl<'a> Iterator for FrameTreeIterator<'a> { type Item = &'a Frame; fn next(&mut self) -> Option<&'a Frame> { loop { let frame_id = match self.stack.pop() { Some(frame_id) => frame_id, None => return None, }; let frame = match self.frames.get(&frame_id) { Some(frame) => frame, None => { warn!("Frame {:?} iterated after closure.", frame_id); continue; }, }; let pipeline = match self.pipelines.get(&frame.pipeline_id) { Some(pipeline) => pipeline, None => { warn!("Pipeline {:?} iterated after closure.", frame.pipeline_id); continue; }, }; self.stack.extend(pipeline.children.iter()); return Some(frame) } } } /// An iterator over a frame tree, returning all frames in depth-first /// order. Note that this iterator returns all frames, not just the /// fully active ones. pub struct FullFrameTreeIterator<'a> { /// The frames still to iterate over. pub stack: Vec<FrameId>, /// The set of all frames. pub frames: &'a HashMap<FrameId, Frame>, /// The set of all pipelines. We use this to find the /// children of a frame, which are the iframes in all documents /// in the session history. pub pipelines: &'a HashMap<PipelineId, Pipeline>, } impl<'a> Iterator for FullFrameTreeIterator<'a> { type Item = &'a Frame; fn next(&mut self) -> Option<&'a Frame> {
}; let frame = match self.frames.get(&frame_id) { Some(frame) => frame, None => { warn!("Frame {:?} iterated after closure.", frame_id); continue; }, }; let child_frame_ids = frame.prev.iter().chain(frame.next.iter()) .filter_map(|entry| entry.pipeline_id) .chain(once(frame.pipeline_id)) .filter_map(|pipeline_id| pipelines.get(&pipeline_id)) .flat_map(|pipeline| pipeline.children.iter()); self.stack.extend(child_frame_ids); return Some(frame) } } }
let pipelines = self.pipelines; loop { let frame_id = match self.stack.pop() { Some(frame_id) => frame_id, None => return None,
random_line_split
frame.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use msg::constellation_msg::{FrameId, PipelineId}; use pipeline::Pipeline; use script_traits::LoadData; use std::collections::HashMap; use std::iter::once; use std::mem::replace; use std::time::Instant; /// A frame in the frame tree. /// Each frame is the constellation's view of a browsing context. /// Each browsing context has a session history, caused by /// navigation and traversing the history. Each frame has its /// current entry, plus past and future entries. The past is sorted /// chronologically, the future is sorted reverse chronologically: /// in particular prev.pop() is the latest past entry, and /// next.pop() is the earliest future entry. pub struct Frame { /// The frame id. pub id: FrameId, /// The timestamp for the current session history entry pub instant: Instant, /// The pipeline for the current session history entry pub pipeline_id: PipelineId, /// The load data for the current session history entry pub load_data: LoadData, /// The past session history, ordered chronologically. pub prev: Vec<FrameState>, /// The future session history, ordered reverse chronologically. pub next: Vec<FrameState>, } impl Frame { /// Create a new frame. /// Note this just creates the frame, it doesn't add it to the frame tree. pub fn new(id: FrameId, pipeline_id: PipelineId, load_data: LoadData) -> Frame { Frame { id: id, pipeline_id: pipeline_id, instant: Instant::now(), load_data: load_data, prev: vec!(), next: vec!(), } } /// Get the current frame state. pub fn current(&self) -> FrameState { FrameState { instant: self.instant, frame_id: self.id, pipeline_id: Some(self.pipeline_id), load_data: self.load_data.clone(), } } /// Set the current frame entry, and push the current frame entry into the past. pub fn load(&mut self, pipeline_id: PipelineId, load_data: LoadData) { let current = self.current(); self.prev.push(current); self.instant = Instant::now(); self.pipeline_id = pipeline_id; self.load_data = load_data; } /// Set the future to be empty. pub fn
(&mut self) -> Vec<FrameState> { replace(&mut self.next, vec!()) } /// Update the current entry of the Frame from an entry that has been traversed to. pub fn update_current(&mut self, pipeline_id: PipelineId, entry: FrameState) { self.pipeline_id = pipeline_id; self.instant = entry.instant; self.load_data = entry.load_data; } } /// An entry in a frame's session history. /// Each entry stores the pipeline id for a document in the session history. /// /// When we operate on the joint session history, entries are sorted chronologically, /// so we timestamp the entries by when the entry was added to the session history. #[derive(Clone)] pub struct FrameState { /// The timestamp for when the session history entry was created pub instant: Instant, /// The pipeline for the document in the session history, /// None if the entry has been discarded pub pipeline_id: Option<PipelineId>, /// The load data for this entry, used to reload the pipeline if it has been discarded pub load_data: LoadData, /// The frame that this session history entry is part of pub frame_id: FrameId, } /// Represents a pending change in the frame tree, that will be applied /// once the new pipeline has loaded and completed initial layout / paint. pub struct FrameChange { /// The frame to change. pub frame_id: FrameId, /// The pipeline for the document being loaded. pub new_pipeline_id: PipelineId, /// The data for the document being loaded. pub load_data: LoadData, /// Is the new document replacing the current document (e.g. a reload) /// or pushing it into the session history (e.g. a navigation)? /// If it is replacing an existing entry, we store its timestamp. pub replace_instant: Option<Instant>, } /// An iterator over a frame tree, returning the fully active frames in /// depth-first order. Note that this iterator only returns the fully /// active frames, that is ones where every ancestor frame is /// in the currently active pipeline of its parent frame. pub struct FrameTreeIterator<'a> { /// The frames still to iterate over. pub stack: Vec<FrameId>, /// The set of all frames. pub frames: &'a HashMap<FrameId, Frame>, /// The set of all pipelines. We use this to find the active /// children of a frame, which are the iframes in the currently /// active document. pub pipelines: &'a HashMap<PipelineId, Pipeline>, } impl<'a> Iterator for FrameTreeIterator<'a> { type Item = &'a Frame; fn next(&mut self) -> Option<&'a Frame> { loop { let frame_id = match self.stack.pop() { Some(frame_id) => frame_id, None => return None, }; let frame = match self.frames.get(&frame_id) { Some(frame) => frame, None => { warn!("Frame {:?} iterated after closure.", frame_id); continue; }, }; let pipeline = match self.pipelines.get(&frame.pipeline_id) { Some(pipeline) => pipeline, None => { warn!("Pipeline {:?} iterated after closure.", frame.pipeline_id); continue; }, }; self.stack.extend(pipeline.children.iter()); return Some(frame) } } } /// An iterator over a frame tree, returning all frames in depth-first /// order. Note that this iterator returns all frames, not just the /// fully active ones. pub struct FullFrameTreeIterator<'a> { /// The frames still to iterate over. pub stack: Vec<FrameId>, /// The set of all frames. pub frames: &'a HashMap<FrameId, Frame>, /// The set of all pipelines. We use this to find the /// children of a frame, which are the iframes in all documents /// in the session history. pub pipelines: &'a HashMap<PipelineId, Pipeline>, } impl<'a> Iterator for FullFrameTreeIterator<'a> { type Item = &'a Frame; fn next(&mut self) -> Option<&'a Frame> { let pipelines = self.pipelines; loop { let frame_id = match self.stack.pop() { Some(frame_id) => frame_id, None => return None, }; let frame = match self.frames.get(&frame_id) { Some(frame) => frame, None => { warn!("Frame {:?} iterated after closure.", frame_id); continue; }, }; let child_frame_ids = frame.prev.iter().chain(frame.next.iter()) .filter_map(|entry| entry.pipeline_id) .chain(once(frame.pipeline_id)) .filter_map(|pipeline_id| pipelines.get(&pipeline_id)) .flat_map(|pipeline| pipeline.children.iter()); self.stack.extend(child_frame_ids); return Some(frame) } } }
remove_forward_entries
identifier_name
mutex.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! A simple native mutex implementation. Warning: this API is likely //! to change soon. #![allow(dead_code)] use core::prelude::*; use alloc::boxed::Box; use rustrt::mutex; pub const LOCKED: uint = 1 << 0; pub const BLOCKED: uint = 1 << 1; /// A mutual exclusion primitive useful for protecting shared data /// /// This mutex will properly block tasks waiting for the lock to become /// available. The mutex can also be statically initialized or created via a /// `new` constructor. /// /// # Example /// /// ```rust,ignore /// use std::sync::mutex::Mutex; /// /// let m = Mutex::new(); /// let guard = m.lock(); /// // do some work /// drop(guard); // unlock the lock /// ``` pub struct Mutex { // Note that this static mutex is in a *box*, not inlined into the struct // itself. This is done for memory safety reasons with the usage of a // StaticNativeMutex inside the static mutex above. Once a native mutex has // been used once, its address can never change (it can't be moved). This // mutex type can be safely moved at any time, so to ensure that the native // mutex is used correctly we box the inner lock to give it a constant // address. lock: Box<StaticMutex>, } /// The static mutex type is provided to allow for static allocation of mutexes. /// /// Note that this is a separate type because using a Mutex correctly means that /// it needs to have a destructor run. In Rust, statics are not allowed to have /// destructors. As a result, a `StaticMutex` has one extra method when compared /// to a `Mutex`, a `destroy` method. This method is unsafe to call, and /// documentation can be found directly on the method. /// /// # Example /// /// ```rust,ignore /// use std::sync::mutex::{StaticMutex, MUTEX_INIT}; /// /// static LOCK: StaticMutex = MUTEX_INIT; /// /// { /// let _g = LOCK.lock(); /// // do some productive work /// } /// // lock is unlocked here. /// ``` pub struct StaticMutex { lock: mutex::StaticNativeMutex, } /// An RAII implementation of a "scoped lock" of a mutex. When this structure is /// dropped (falls out of scope), the lock will be unlocked. #[must_use] pub struct Guard<'a> { guard: mutex::LockGuard<'a>, } fn lift_guard(guard: mutex::LockGuard) -> Guard { Guard { guard: guard } } /// Static initialization of a mutex. This constant can be used to initialize /// other mutex constants. pub const MUTEX_INIT: StaticMutex = StaticMutex { lock: mutex::NATIVE_MUTEX_INIT }; impl StaticMutex { /// Attempts to grab this lock, see `Mutex::try_lock` pub fn try_lock<'a>(&'a self) -> Option<Guard<'a>> { unsafe { self.lock.trylock().map(lift_guard) } } /// Acquires this lock, see `Mutex::lock` pub fn lock<'a>(&'a self) -> Guard<'a> { lift_guard(unsafe { self.lock.lock() }) } /// Deallocates resources associated with this static mutex. /// /// This method is unsafe because it provides no guarantees that there are /// no active users of this mutex, and safety is not guaranteed if there are /// active users of this mutex. /// /// This method is required to ensure that there are no memory leaks on /// *all* platforms. It may be the case that some platforms do not leak /// memory if this method is not called, but this is not guaranteed to be /// true on all platforms. pub unsafe fn destroy(&self) { self.lock.destroy() } } impl Mutex { /// Creates a new mutex in an unlocked state ready for use. pub fn new() -> Mutex { Mutex { lock: box StaticMutex { lock: unsafe { mutex::StaticNativeMutex::new() }, } } } /// Attempts to acquire this lock. /// /// If the lock could not be acquired at this time, then `None` is returned. /// Otherwise, an RAII guard is returned. The lock will be unlocked when the /// guard is dropped. /// /// This function does not block. pub fn try_lock<'a>(&'a self) -> Option<Guard<'a>> { self.lock.try_lock() } /// Acquires a mutex, blocking the current task until it is able to do so. /// /// This function will block the local task until it is available to acquire /// the mutex. Upon returning, the task is the only task with the mutex /// held. An RAII guard is returned to allow scoped unlock of the lock. When /// the guard goes out of scope, the mutex will be unlocked. pub fn lock<'a>(&'a self) -> Guard<'a> { self.lock.lock() } } impl Drop for Mutex { fn drop(&mut self) { // This is actually safe b/c we know that there is no further usage of // this mutex (it's up to the user to arrange for a mutex to get // dropped, that's not our job) unsafe { self.lock.destroy() } } } #[cfg(test)] mod test { use prelude::*; use super::{Mutex, StaticMutex, MUTEX_INIT}; #[test] fn smoke() { let m = Mutex::new(); drop(m.lock()); drop(m.lock()); } #[test] fn smoke_static() { static M: StaticMutex = MUTEX_INIT; unsafe { drop(M.lock()); drop(M.lock()); M.destroy(); } } #[test] fn lots_and_lots() { static M: StaticMutex = MUTEX_INIT; static mut CNT: uint = 0; static J: uint = 1000; static K: uint = 3; fn inc() { for _ in range(0, J) { unsafe { let _g = M.lock(); CNT += 1; }
let (tx, rx) = channel(); for _ in range(0, K) { let tx2 = tx.clone(); spawn(proc() { inc(); tx2.send(()); }); let tx2 = tx.clone(); spawn(proc() { inc(); tx2.send(()); }); } drop(tx); for _ in range(0, 2 * K) { rx.recv(); } assert_eq!(unsafe {CNT}, J * K * 2); unsafe { M.destroy(); } } #[test] fn trylock() { let m = Mutex::new(); assert!(m.try_lock().is_some()); } }
} }
random_line_split
mutex.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! A simple native mutex implementation. Warning: this API is likely //! to change soon. #![allow(dead_code)] use core::prelude::*; use alloc::boxed::Box; use rustrt::mutex; pub const LOCKED: uint = 1 << 0; pub const BLOCKED: uint = 1 << 1; /// A mutual exclusion primitive useful for protecting shared data /// /// This mutex will properly block tasks waiting for the lock to become /// available. The mutex can also be statically initialized or created via a /// `new` constructor. /// /// # Example /// /// ```rust,ignore /// use std::sync::mutex::Mutex; /// /// let m = Mutex::new(); /// let guard = m.lock(); /// // do some work /// drop(guard); // unlock the lock /// ``` pub struct Mutex { // Note that this static mutex is in a *box*, not inlined into the struct // itself. This is done for memory safety reasons with the usage of a // StaticNativeMutex inside the static mutex above. Once a native mutex has // been used once, its address can never change (it can't be moved). This // mutex type can be safely moved at any time, so to ensure that the native // mutex is used correctly we box the inner lock to give it a constant // address. lock: Box<StaticMutex>, } /// The static mutex type is provided to allow for static allocation of mutexes. /// /// Note that this is a separate type because using a Mutex correctly means that /// it needs to have a destructor run. In Rust, statics are not allowed to have /// destructors. As a result, a `StaticMutex` has one extra method when compared /// to a `Mutex`, a `destroy` method. This method is unsafe to call, and /// documentation can be found directly on the method. /// /// # Example /// /// ```rust,ignore /// use std::sync::mutex::{StaticMutex, MUTEX_INIT}; /// /// static LOCK: StaticMutex = MUTEX_INIT; /// /// { /// let _g = LOCK.lock(); /// // do some productive work /// } /// // lock is unlocked here. /// ``` pub struct StaticMutex { lock: mutex::StaticNativeMutex, } /// An RAII implementation of a "scoped lock" of a mutex. When this structure is /// dropped (falls out of scope), the lock will be unlocked. #[must_use] pub struct Guard<'a> { guard: mutex::LockGuard<'a>, } fn
(guard: mutex::LockGuard) -> Guard { Guard { guard: guard } } /// Static initialization of a mutex. This constant can be used to initialize /// other mutex constants. pub const MUTEX_INIT: StaticMutex = StaticMutex { lock: mutex::NATIVE_MUTEX_INIT }; impl StaticMutex { /// Attempts to grab this lock, see `Mutex::try_lock` pub fn try_lock<'a>(&'a self) -> Option<Guard<'a>> { unsafe { self.lock.trylock().map(lift_guard) } } /// Acquires this lock, see `Mutex::lock` pub fn lock<'a>(&'a self) -> Guard<'a> { lift_guard(unsafe { self.lock.lock() }) } /// Deallocates resources associated with this static mutex. /// /// This method is unsafe because it provides no guarantees that there are /// no active users of this mutex, and safety is not guaranteed if there are /// active users of this mutex. /// /// This method is required to ensure that there are no memory leaks on /// *all* platforms. It may be the case that some platforms do not leak /// memory if this method is not called, but this is not guaranteed to be /// true on all platforms. pub unsafe fn destroy(&self) { self.lock.destroy() } } impl Mutex { /// Creates a new mutex in an unlocked state ready for use. pub fn new() -> Mutex { Mutex { lock: box StaticMutex { lock: unsafe { mutex::StaticNativeMutex::new() }, } } } /// Attempts to acquire this lock. /// /// If the lock could not be acquired at this time, then `None` is returned. /// Otherwise, an RAII guard is returned. The lock will be unlocked when the /// guard is dropped. /// /// This function does not block. pub fn try_lock<'a>(&'a self) -> Option<Guard<'a>> { self.lock.try_lock() } /// Acquires a mutex, blocking the current task until it is able to do so. /// /// This function will block the local task until it is available to acquire /// the mutex. Upon returning, the task is the only task with the mutex /// held. An RAII guard is returned to allow scoped unlock of the lock. When /// the guard goes out of scope, the mutex will be unlocked. pub fn lock<'a>(&'a self) -> Guard<'a> { self.lock.lock() } } impl Drop for Mutex { fn drop(&mut self) { // This is actually safe b/c we know that there is no further usage of // this mutex (it's up to the user to arrange for a mutex to get // dropped, that's not our job) unsafe { self.lock.destroy() } } } #[cfg(test)] mod test { use prelude::*; use super::{Mutex, StaticMutex, MUTEX_INIT}; #[test] fn smoke() { let m = Mutex::new(); drop(m.lock()); drop(m.lock()); } #[test] fn smoke_static() { static M: StaticMutex = MUTEX_INIT; unsafe { drop(M.lock()); drop(M.lock()); M.destroy(); } } #[test] fn lots_and_lots() { static M: StaticMutex = MUTEX_INIT; static mut CNT: uint = 0; static J: uint = 1000; static K: uint = 3; fn inc() { for _ in range(0, J) { unsafe { let _g = M.lock(); CNT += 1; } } } let (tx, rx) = channel(); for _ in range(0, K) { let tx2 = tx.clone(); spawn(proc() { inc(); tx2.send(()); }); let tx2 = tx.clone(); spawn(proc() { inc(); tx2.send(()); }); } drop(tx); for _ in range(0, 2 * K) { rx.recv(); } assert_eq!(unsafe {CNT}, J * K * 2); unsafe { M.destroy(); } } #[test] fn trylock() { let m = Mutex::new(); assert!(m.try_lock().is_some()); } }
lift_guard
identifier_name
dom.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Types and traits used to access the DOM from style calculation. #![allow(unsafe_code)] use {Atom, Namespace, LocalName}; use atomic_refcell::{AtomicRef, AtomicRefCell}; use data::{ElementStyles, ElementData}; use element_state::ElementState; use parking_lot::RwLock; use properties::{ComputedValues, PropertyDeclarationBlock}; use properties::longhands::display::computed_value as display; use restyle_hints::{RESTYLE_DESCENDANTS, RESTYLE_LATER_SIBLINGS, RESTYLE_SELF, RestyleHint}; use selector_parser::{ElementExt, PseudoElement, RestyleDamage}; use sink::Push; use std::fmt::Debug; use std::ops::BitOr; use std::sync::Arc; use stylist::ApplicableDeclarationBlock; use traversal::DomTraversalContext; use util::opts; pub use style_traits::UnsafeNode; /// An opaque handle to a node, which, unlike UnsafeNode, cannot be transformed /// back into a non-opaque representation. The only safe operation that can be /// performed on this node is to compare it to another opaque handle or to another /// OpaqueNode. /// /// Layout and Graphics use this to safely represent nodes for comparison purposes. /// Because the script task's GC does not trace layout, node data cannot be safely stored in layout /// data structures. Also, layout code tends to be faster when the DOM is not being accessed, for /// locality reasons. Using `OpaqueNode` enforces this invariant. #[derive(Clone, PartialEq, Copy, Debug, Hash, Eq)] #[cfg_attr(feature = "servo", derive(HeapSizeOf, Deserialize, Serialize))] pub struct OpaqueNode(pub usize); impl OpaqueNode { /// Returns the address of this node, for debugging purposes. #[inline] pub fn id(&self) -> usize { self.0 } } #[derive(Clone, Copy, PartialEq)] pub enum StylingMode { /// The node has never been styled before, and needs a full style computation. Initial, /// The node has been styled before, but needs some amount of recomputation. Restyle, /// The node does not need any style processing, but one or more of its /// descendants do. Traverse, /// No nodes in this subtree require style processing. Stop, } pub trait TRestyleDamage : Debug + PartialEq + BitOr<Output=Self> + Copy { /// The source for our current computed values in the cascade. This is a /// ComputedValues in Servo and a StyleContext in Gecko. /// /// This is needed because Gecko has a few optimisations for the calculation /// of the difference depending on which values have been used during /// layout. /// /// This should be obtained via TNode::existing_style_for_restyle_damage type PreExistingComputedValues; fn compute(old: &Self::PreExistingComputedValues, new: &Arc<ComputedValues>) -> Self; fn empty() -> Self; fn rebuild_and_reflow() -> Self; } /// Simple trait to provide basic information about the type of an element. /// /// We avoid exposing the full type id, since computing it in the general case /// would be difficult for Gecko nodes. pub trait NodeInfo { fn is_element(&self) -> bool; fn is_text_node(&self) -> bool; // Comments, doctypes, etc are ignored by layout algorithms. fn needs_layout(&self) -> bool { self.is_element() || self.is_text_node() } } pub struct LayoutIterator<T>(pub T); impl<T, I> Iterator for LayoutIterator<T> where T: Iterator<Item=I>, I: NodeInfo { type Item = I; fn next(&mut self) -> Option<I> { loop { // Filter out nodes that layout should ignore. let n = self.0.next(); if n.is_none() || n.as_ref().unwrap().needs_layout() { return n } } } } pub trait TNode : Sized + Copy + Clone + NodeInfo { type ConcreteElement: TElement<ConcreteNode = Self>; type ConcreteChildrenIterator: Iterator<Item = Self>; fn to_unsafe(&self) -> UnsafeNode; unsafe fn from_unsafe(n: &UnsafeNode) -> Self; fn dump(self); fn dump_style(self); /// Returns an iterator over this node's children. fn children(self) -> LayoutIterator<Self::ConcreteChildrenIterator>; /// Converts self into an `OpaqueNode`. fn opaque(&self) -> OpaqueNode; /// While doing a reflow, the node at the root has no parent, as far as we're /// concerned. This method returns `None` at the reflow root. fn layout_parent_element(self, reflow_root: OpaqueNode) -> Option<Self::ConcreteElement>; fn debug_id(self) -> usize; fn as_element(&self) -> Option<Self::ConcreteElement>; fn needs_dirty_on_viewport_size_changed(&self) -> bool; unsafe fn set_dirty_on_viewport_size_changed(&self); fn can_be_fragmented(&self) -> bool; unsafe fn set_can_be_fragmented(&self, value: bool); fn parent_node(&self) -> Option<Self>; fn first_child(&self) -> Option<Self>; fn last_child(&self) -> Option<Self>; fn prev_sibling(&self) -> Option<Self>; fn next_sibling(&self) -> Option<Self>; } pub trait PresentationalHintsSynthetizer { fn synthesize_presentational_hints_for_legacy_attributes<V>(&self, hints: &mut V) where V: Push<ApplicableDeclarationBlock>; } pub trait TElement : PartialEq + Debug + Sized + Copy + Clone + ElementExt + PresentationalHintsSynthetizer { type ConcreteNode: TNode<ConcreteElement = Self>; fn as_node(&self) -> Self::ConcreteNode; fn style_attribute(&self) -> Option<&Arc<RwLock<PropertyDeclarationBlock>>>; fn get_state(&self) -> ElementState; fn has_attr(&self, namespace: &Namespace, attr: &LocalName) -> bool; fn attr_equals(&self, namespace: &Namespace, attr: &LocalName, value: &Atom) -> bool; /// Set the restyle damage field. fn set_restyle_damage(self, damage: RestyleDamage); /// XXX: It's a bit unfortunate we need to pass the current computed values /// as an argument here, but otherwise Servo would crash due to double /// borrows to return it. fn existing_style_for_restyle_damage<'a>(&'a self, current_computed_values: Option<&'a Arc<ComputedValues>>, pseudo: Option<&PseudoElement>) -> Option<&'a <RestyleDamage as TRestyleDamage>::PreExistingComputedValues>; /// The concept of a dirty bit doesn't exist in our new restyle algorithm. /// Instead, we associate restyle and change hints with nodes. However, we /// continue to allow the dirty bit to trigger unconditional restyles while /// we transition both Servo and Stylo to the new architecture. fn deprecated_dirty_bit_is_set(&self) -> bool; fn has_dirty_descendants(&self) -> bool; unsafe fn set_dirty_descendants(&self); /// Atomically stores the number of children of this node that we will /// need to process during bottom-up traversal. fn store_children_to_process(&self, n: isize); /// Atomically notes that a child has been processed during bottom-up /// traversal. Returns the number of children left to process. fn did_process_child(&self) -> isize; /// Returns true if this element's current style is display:none. Only valid /// to call after styling. fn is_display_none(&self) -> bool { self.borrow_data().unwrap() .current_styles().primary .get_box().clone_display() == display::T::none } /// Returns true if this node has a styled layout frame that owns the style. fn frame_has_style(&self) -> bool { false } /// Returns the styles from the layout frame that owns them, if any. /// /// FIXME(bholley): Once we start dropping ElementData from nodes when /// creating frames, we'll want to teach this method to actually get /// style data from the frame. fn get_styles_from_frame(&self) -> Option<ElementStyles> { None } /// Returns the styling mode for this node. This is only valid to call before /// and during restyling, before finish_styling is invoked. /// /// See the comments around StylingMode. fn styling_mode(&self) -> StylingMode { use self::StylingMode::*; // Non-incremental layout impersonates Initial. if opts::get().nonincremental_layout { return Initial; } // Compute the default result if this node doesn't require processing. let mode_for_descendants = if self.has_dirty_descendants() { Traverse } else { Stop }; let mut mode = match self.borrow_data() { // No element data, no style on the frame. None if!self.frame_has_style() => Initial, // No element data, style on the frame. None => mode_for_descendants, // We have element data. Decide below. Some(d) => { if d.has_current_styles() { // The element has up-to-date style. debug_assert!(!self.frame_has_style()); debug_assert!(d.restyle_data.is_none()); mode_for_descendants } else { // The element needs processing. if d.previous_styles().is_some() { Restyle } else { Initial } } }, }; // Handle the deprecated dirty bit. This should go away soon. if mode!= Initial && self.deprecated_dirty_bit_is_set() { mode = Restyle; } mode } /// Immutable borrows the ElementData. fn borrow_data(&self) -> Option<AtomicRef<ElementData>>; /// Gets a reference to the ElementData container. fn get_data(&self) -> Option<&AtomicRefCell<ElementData>>; /// Properly marks nodes as dirty in response to restyle hints. fn
<C: DomTraversalContext<Self::ConcreteNode>>(&self, hint: RestyleHint) { // Bail early if there's no restyling to do. if hint.is_empty() { return; } // If the restyle hint is non-empty, we need to restyle either this element // or one of its siblings. Mark our ancestor chain as having dirty descendants. let mut curr = *self; while let Some(parent) = curr.parent_element() { if parent.has_dirty_descendants() { break } unsafe { parent.set_dirty_descendants(); } curr = parent; } // Process hints. if hint.contains(RESTYLE_SELF) { unsafe { let _ = C::prepare_for_styling(self); } // XXX(emilio): For now, dirty implies dirty descendants if found. } else if hint.contains(RESTYLE_DESCENDANTS) { unsafe { self.set_dirty_descendants(); } let mut current = self.first_child_element(); while let Some(el) = current { unsafe { let _ = C::prepare_for_styling(&el); } current = el.next_sibling_element(); } } if hint.contains(RESTYLE_LATER_SIBLINGS) { let mut next = ::selectors::Element::next_sibling_element(self); while let Some(sib) = next { unsafe { let _ = C::prepare_for_styling(&sib); } next = ::selectors::Element::next_sibling_element(&sib); } } } }
note_restyle_hint
identifier_name
dom.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Types and traits used to access the DOM from style calculation. #![allow(unsafe_code)] use {Atom, Namespace, LocalName}; use atomic_refcell::{AtomicRef, AtomicRefCell}; use data::{ElementStyles, ElementData}; use element_state::ElementState; use parking_lot::RwLock; use properties::{ComputedValues, PropertyDeclarationBlock}; use properties::longhands::display::computed_value as display; use restyle_hints::{RESTYLE_DESCENDANTS, RESTYLE_LATER_SIBLINGS, RESTYLE_SELF, RestyleHint}; use selector_parser::{ElementExt, PseudoElement, RestyleDamage}; use sink::Push; use std::fmt::Debug; use std::ops::BitOr; use std::sync::Arc; use stylist::ApplicableDeclarationBlock; use traversal::DomTraversalContext; use util::opts; pub use style_traits::UnsafeNode; /// An opaque handle to a node, which, unlike UnsafeNode, cannot be transformed /// back into a non-opaque representation. The only safe operation that can be /// performed on this node is to compare it to another opaque handle or to another /// OpaqueNode. /// /// Layout and Graphics use this to safely represent nodes for comparison purposes. /// Because the script task's GC does not trace layout, node data cannot be safely stored in layout /// data structures. Also, layout code tends to be faster when the DOM is not being accessed, for /// locality reasons. Using `OpaqueNode` enforces this invariant. #[derive(Clone, PartialEq, Copy, Debug, Hash, Eq)] #[cfg_attr(feature = "servo", derive(HeapSizeOf, Deserialize, Serialize))] pub struct OpaqueNode(pub usize); impl OpaqueNode { /// Returns the address of this node, for debugging purposes. #[inline] pub fn id(&self) -> usize { self.0 } } #[derive(Clone, Copy, PartialEq)] pub enum StylingMode { /// The node has never been styled before, and needs a full style computation. Initial, /// The node has been styled before, but needs some amount of recomputation. Restyle, /// The node does not need any style processing, but one or more of its /// descendants do. Traverse, /// No nodes in this subtree require style processing. Stop, } pub trait TRestyleDamage : Debug + PartialEq + BitOr<Output=Self> + Copy { /// The source for our current computed values in the cascade. This is a /// ComputedValues in Servo and a StyleContext in Gecko. /// /// This is needed because Gecko has a few optimisations for the calculation /// of the difference depending on which values have been used during /// layout. /// /// This should be obtained via TNode::existing_style_for_restyle_damage type PreExistingComputedValues; fn compute(old: &Self::PreExistingComputedValues, new: &Arc<ComputedValues>) -> Self; fn empty() -> Self; fn rebuild_and_reflow() -> Self; } /// Simple trait to provide basic information about the type of an element. /// /// We avoid exposing the full type id, since computing it in the general case /// would be difficult for Gecko nodes. pub trait NodeInfo { fn is_element(&self) -> bool; fn is_text_node(&self) -> bool; // Comments, doctypes, etc are ignored by layout algorithms. fn needs_layout(&self) -> bool { self.is_element() || self.is_text_node() } } pub struct LayoutIterator<T>(pub T); impl<T, I> Iterator for LayoutIterator<T> where T: Iterator<Item=I>, I: NodeInfo { type Item = I; fn next(&mut self) -> Option<I> { loop { // Filter out nodes that layout should ignore. let n = self.0.next(); if n.is_none() || n.as_ref().unwrap().needs_layout() { return n } } } } pub trait TNode : Sized + Copy + Clone + NodeInfo { type ConcreteElement: TElement<ConcreteNode = Self>; type ConcreteChildrenIterator: Iterator<Item = Self>; fn to_unsafe(&self) -> UnsafeNode; unsafe fn from_unsafe(n: &UnsafeNode) -> Self; fn dump(self); fn dump_style(self); /// Returns an iterator over this node's children. fn children(self) -> LayoutIterator<Self::ConcreteChildrenIterator>; /// Converts self into an `OpaqueNode`. fn opaque(&self) -> OpaqueNode; /// While doing a reflow, the node at the root has no parent, as far as we're /// concerned. This method returns `None` at the reflow root. fn layout_parent_element(self, reflow_root: OpaqueNode) -> Option<Self::ConcreteElement>; fn debug_id(self) -> usize; fn as_element(&self) -> Option<Self::ConcreteElement>; fn needs_dirty_on_viewport_size_changed(&self) -> bool; unsafe fn set_dirty_on_viewport_size_changed(&self); fn can_be_fragmented(&self) -> bool; unsafe fn set_can_be_fragmented(&self, value: bool); fn parent_node(&self) -> Option<Self>; fn first_child(&self) -> Option<Self>; fn last_child(&self) -> Option<Self>; fn prev_sibling(&self) -> Option<Self>; fn next_sibling(&self) -> Option<Self>; } pub trait PresentationalHintsSynthetizer { fn synthesize_presentational_hints_for_legacy_attributes<V>(&self, hints: &mut V) where V: Push<ApplicableDeclarationBlock>; } pub trait TElement : PartialEq + Debug + Sized + Copy + Clone + ElementExt + PresentationalHintsSynthetizer { type ConcreteNode: TNode<ConcreteElement = Self>; fn as_node(&self) -> Self::ConcreteNode; fn style_attribute(&self) -> Option<&Arc<RwLock<PropertyDeclarationBlock>>>; fn get_state(&self) -> ElementState; fn has_attr(&self, namespace: &Namespace, attr: &LocalName) -> bool; fn attr_equals(&self, namespace: &Namespace, attr: &LocalName, value: &Atom) -> bool; /// Set the restyle damage field. fn set_restyle_damage(self, damage: RestyleDamage); /// XXX: It's a bit unfortunate we need to pass the current computed values /// as an argument here, but otherwise Servo would crash due to double /// borrows to return it. fn existing_style_for_restyle_damage<'a>(&'a self, current_computed_values: Option<&'a Arc<ComputedValues>>, pseudo: Option<&PseudoElement>) -> Option<&'a <RestyleDamage as TRestyleDamage>::PreExistingComputedValues>; /// The concept of a dirty bit doesn't exist in our new restyle algorithm. /// Instead, we associate restyle and change hints with nodes. However, we /// continue to allow the dirty bit to trigger unconditional restyles while /// we transition both Servo and Stylo to the new architecture. fn deprecated_dirty_bit_is_set(&self) -> bool; fn has_dirty_descendants(&self) -> bool; unsafe fn set_dirty_descendants(&self); /// Atomically stores the number of children of this node that we will /// need to process during bottom-up traversal. fn store_children_to_process(&self, n: isize); /// Atomically notes that a child has been processed during bottom-up /// traversal. Returns the number of children left to process. fn did_process_child(&self) -> isize;
.get_box().clone_display() == display::T::none } /// Returns true if this node has a styled layout frame that owns the style. fn frame_has_style(&self) -> bool { false } /// Returns the styles from the layout frame that owns them, if any. /// /// FIXME(bholley): Once we start dropping ElementData from nodes when /// creating frames, we'll want to teach this method to actually get /// style data from the frame. fn get_styles_from_frame(&self) -> Option<ElementStyles> { None } /// Returns the styling mode for this node. This is only valid to call before /// and during restyling, before finish_styling is invoked. /// /// See the comments around StylingMode. fn styling_mode(&self) -> StylingMode { use self::StylingMode::*; // Non-incremental layout impersonates Initial. if opts::get().nonincremental_layout { return Initial; } // Compute the default result if this node doesn't require processing. let mode_for_descendants = if self.has_dirty_descendants() { Traverse } else { Stop }; let mut mode = match self.borrow_data() { // No element data, no style on the frame. None if!self.frame_has_style() => Initial, // No element data, style on the frame. None => mode_for_descendants, // We have element data. Decide below. Some(d) => { if d.has_current_styles() { // The element has up-to-date style. debug_assert!(!self.frame_has_style()); debug_assert!(d.restyle_data.is_none()); mode_for_descendants } else { // The element needs processing. if d.previous_styles().is_some() { Restyle } else { Initial } } }, }; // Handle the deprecated dirty bit. This should go away soon. if mode!= Initial && self.deprecated_dirty_bit_is_set() { mode = Restyle; } mode } /// Immutable borrows the ElementData. fn borrow_data(&self) -> Option<AtomicRef<ElementData>>; /// Gets a reference to the ElementData container. fn get_data(&self) -> Option<&AtomicRefCell<ElementData>>; /// Properly marks nodes as dirty in response to restyle hints. fn note_restyle_hint<C: DomTraversalContext<Self::ConcreteNode>>(&self, hint: RestyleHint) { // Bail early if there's no restyling to do. if hint.is_empty() { return; } // If the restyle hint is non-empty, we need to restyle either this element // or one of its siblings. Mark our ancestor chain as having dirty descendants. let mut curr = *self; while let Some(parent) = curr.parent_element() { if parent.has_dirty_descendants() { break } unsafe { parent.set_dirty_descendants(); } curr = parent; } // Process hints. if hint.contains(RESTYLE_SELF) { unsafe { let _ = C::prepare_for_styling(self); } // XXX(emilio): For now, dirty implies dirty descendants if found. } else if hint.contains(RESTYLE_DESCENDANTS) { unsafe { self.set_dirty_descendants(); } let mut current = self.first_child_element(); while let Some(el) = current { unsafe { let _ = C::prepare_for_styling(&el); } current = el.next_sibling_element(); } } if hint.contains(RESTYLE_LATER_SIBLINGS) { let mut next = ::selectors::Element::next_sibling_element(self); while let Some(sib) = next { unsafe { let _ = C::prepare_for_styling(&sib); } next = ::selectors::Element::next_sibling_element(&sib); } } } }
/// Returns true if this element's current style is display:none. Only valid /// to call after styling. fn is_display_none(&self) -> bool { self.borrow_data().unwrap() .current_styles().primary
random_line_split
dom.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Types and traits used to access the DOM from style calculation. #![allow(unsafe_code)] use {Atom, Namespace, LocalName}; use atomic_refcell::{AtomicRef, AtomicRefCell}; use data::{ElementStyles, ElementData}; use element_state::ElementState; use parking_lot::RwLock; use properties::{ComputedValues, PropertyDeclarationBlock}; use properties::longhands::display::computed_value as display; use restyle_hints::{RESTYLE_DESCENDANTS, RESTYLE_LATER_SIBLINGS, RESTYLE_SELF, RestyleHint}; use selector_parser::{ElementExt, PseudoElement, RestyleDamage}; use sink::Push; use std::fmt::Debug; use std::ops::BitOr; use std::sync::Arc; use stylist::ApplicableDeclarationBlock; use traversal::DomTraversalContext; use util::opts; pub use style_traits::UnsafeNode; /// An opaque handle to a node, which, unlike UnsafeNode, cannot be transformed /// back into a non-opaque representation. The only safe operation that can be /// performed on this node is to compare it to another opaque handle or to another /// OpaqueNode. /// /// Layout and Graphics use this to safely represent nodes for comparison purposes. /// Because the script task's GC does not trace layout, node data cannot be safely stored in layout /// data structures. Also, layout code tends to be faster when the DOM is not being accessed, for /// locality reasons. Using `OpaqueNode` enforces this invariant. #[derive(Clone, PartialEq, Copy, Debug, Hash, Eq)] #[cfg_attr(feature = "servo", derive(HeapSizeOf, Deserialize, Serialize))] pub struct OpaqueNode(pub usize); impl OpaqueNode { /// Returns the address of this node, for debugging purposes. #[inline] pub fn id(&self) -> usize { self.0 } } #[derive(Clone, Copy, PartialEq)] pub enum StylingMode { /// The node has never been styled before, and needs a full style computation. Initial, /// The node has been styled before, but needs some amount of recomputation. Restyle, /// The node does not need any style processing, but one or more of its /// descendants do. Traverse, /// No nodes in this subtree require style processing. Stop, } pub trait TRestyleDamage : Debug + PartialEq + BitOr<Output=Self> + Copy { /// The source for our current computed values in the cascade. This is a /// ComputedValues in Servo and a StyleContext in Gecko. /// /// This is needed because Gecko has a few optimisations for the calculation /// of the difference depending on which values have been used during /// layout. /// /// This should be obtained via TNode::existing_style_for_restyle_damage type PreExistingComputedValues; fn compute(old: &Self::PreExistingComputedValues, new: &Arc<ComputedValues>) -> Self; fn empty() -> Self; fn rebuild_and_reflow() -> Self; } /// Simple trait to provide basic information about the type of an element. /// /// We avoid exposing the full type id, since computing it in the general case /// would be difficult for Gecko nodes. pub trait NodeInfo { fn is_element(&self) -> bool; fn is_text_node(&self) -> bool; // Comments, doctypes, etc are ignored by layout algorithms. fn needs_layout(&self) -> bool
} pub struct LayoutIterator<T>(pub T); impl<T, I> Iterator for LayoutIterator<T> where T: Iterator<Item=I>, I: NodeInfo { type Item = I; fn next(&mut self) -> Option<I> { loop { // Filter out nodes that layout should ignore. let n = self.0.next(); if n.is_none() || n.as_ref().unwrap().needs_layout() { return n } } } } pub trait TNode : Sized + Copy + Clone + NodeInfo { type ConcreteElement: TElement<ConcreteNode = Self>; type ConcreteChildrenIterator: Iterator<Item = Self>; fn to_unsafe(&self) -> UnsafeNode; unsafe fn from_unsafe(n: &UnsafeNode) -> Self; fn dump(self); fn dump_style(self); /// Returns an iterator over this node's children. fn children(self) -> LayoutIterator<Self::ConcreteChildrenIterator>; /// Converts self into an `OpaqueNode`. fn opaque(&self) -> OpaqueNode; /// While doing a reflow, the node at the root has no parent, as far as we're /// concerned. This method returns `None` at the reflow root. fn layout_parent_element(self, reflow_root: OpaqueNode) -> Option<Self::ConcreteElement>; fn debug_id(self) -> usize; fn as_element(&self) -> Option<Self::ConcreteElement>; fn needs_dirty_on_viewport_size_changed(&self) -> bool; unsafe fn set_dirty_on_viewport_size_changed(&self); fn can_be_fragmented(&self) -> bool; unsafe fn set_can_be_fragmented(&self, value: bool); fn parent_node(&self) -> Option<Self>; fn first_child(&self) -> Option<Self>; fn last_child(&self) -> Option<Self>; fn prev_sibling(&self) -> Option<Self>; fn next_sibling(&self) -> Option<Self>; } pub trait PresentationalHintsSynthetizer { fn synthesize_presentational_hints_for_legacy_attributes<V>(&self, hints: &mut V) where V: Push<ApplicableDeclarationBlock>; } pub trait TElement : PartialEq + Debug + Sized + Copy + Clone + ElementExt + PresentationalHintsSynthetizer { type ConcreteNode: TNode<ConcreteElement = Self>; fn as_node(&self) -> Self::ConcreteNode; fn style_attribute(&self) -> Option<&Arc<RwLock<PropertyDeclarationBlock>>>; fn get_state(&self) -> ElementState; fn has_attr(&self, namespace: &Namespace, attr: &LocalName) -> bool; fn attr_equals(&self, namespace: &Namespace, attr: &LocalName, value: &Atom) -> bool; /// Set the restyle damage field. fn set_restyle_damage(self, damage: RestyleDamage); /// XXX: It's a bit unfortunate we need to pass the current computed values /// as an argument here, but otherwise Servo would crash due to double /// borrows to return it. fn existing_style_for_restyle_damage<'a>(&'a self, current_computed_values: Option<&'a Arc<ComputedValues>>, pseudo: Option<&PseudoElement>) -> Option<&'a <RestyleDamage as TRestyleDamage>::PreExistingComputedValues>; /// The concept of a dirty bit doesn't exist in our new restyle algorithm. /// Instead, we associate restyle and change hints with nodes. However, we /// continue to allow the dirty bit to trigger unconditional restyles while /// we transition both Servo and Stylo to the new architecture. fn deprecated_dirty_bit_is_set(&self) -> bool; fn has_dirty_descendants(&self) -> bool; unsafe fn set_dirty_descendants(&self); /// Atomically stores the number of children of this node that we will /// need to process during bottom-up traversal. fn store_children_to_process(&self, n: isize); /// Atomically notes that a child has been processed during bottom-up /// traversal. Returns the number of children left to process. fn did_process_child(&self) -> isize; /// Returns true if this element's current style is display:none. Only valid /// to call after styling. fn is_display_none(&self) -> bool { self.borrow_data().unwrap() .current_styles().primary .get_box().clone_display() == display::T::none } /// Returns true if this node has a styled layout frame that owns the style. fn frame_has_style(&self) -> bool { false } /// Returns the styles from the layout frame that owns them, if any. /// /// FIXME(bholley): Once we start dropping ElementData from nodes when /// creating frames, we'll want to teach this method to actually get /// style data from the frame. fn get_styles_from_frame(&self) -> Option<ElementStyles> { None } /// Returns the styling mode for this node. This is only valid to call before /// and during restyling, before finish_styling is invoked. /// /// See the comments around StylingMode. fn styling_mode(&self) -> StylingMode { use self::StylingMode::*; // Non-incremental layout impersonates Initial. if opts::get().nonincremental_layout { return Initial; } // Compute the default result if this node doesn't require processing. let mode_for_descendants = if self.has_dirty_descendants() { Traverse } else { Stop }; let mut mode = match self.borrow_data() { // No element data, no style on the frame. None if!self.frame_has_style() => Initial, // No element data, style on the frame. None => mode_for_descendants, // We have element data. Decide below. Some(d) => { if d.has_current_styles() { // The element has up-to-date style. debug_assert!(!self.frame_has_style()); debug_assert!(d.restyle_data.is_none()); mode_for_descendants } else { // The element needs processing. if d.previous_styles().is_some() { Restyle } else { Initial } } }, }; // Handle the deprecated dirty bit. This should go away soon. if mode!= Initial && self.deprecated_dirty_bit_is_set() { mode = Restyle; } mode } /// Immutable borrows the ElementData. fn borrow_data(&self) -> Option<AtomicRef<ElementData>>; /// Gets a reference to the ElementData container. fn get_data(&self) -> Option<&AtomicRefCell<ElementData>>; /// Properly marks nodes as dirty in response to restyle hints. fn note_restyle_hint<C: DomTraversalContext<Self::ConcreteNode>>(&self, hint: RestyleHint) { // Bail early if there's no restyling to do. if hint.is_empty() { return; } // If the restyle hint is non-empty, we need to restyle either this element // or one of its siblings. Mark our ancestor chain as having dirty descendants. let mut curr = *self; while let Some(parent) = curr.parent_element() { if parent.has_dirty_descendants() { break } unsafe { parent.set_dirty_descendants(); } curr = parent; } // Process hints. if hint.contains(RESTYLE_SELF) { unsafe { let _ = C::prepare_for_styling(self); } // XXX(emilio): For now, dirty implies dirty descendants if found. } else if hint.contains(RESTYLE_DESCENDANTS) { unsafe { self.set_dirty_descendants(); } let mut current = self.first_child_element(); while let Some(el) = current { unsafe { let _ = C::prepare_for_styling(&el); } current = el.next_sibling_element(); } } if hint.contains(RESTYLE_LATER_SIBLINGS) { let mut next = ::selectors::Element::next_sibling_element(self); while let Some(sib) = next { unsafe { let _ = C::prepare_for_styling(&sib); } next = ::selectors::Element::next_sibling_element(&sib); } } } }
{ self.is_element() || self.is_text_node() }
identifier_body
mod.rs
pub fn solve(input: &String) -> String { let input = input.lines().nth(0).unwrap().to_string(); format!( "Skip 1: {} Skip n/2: {}", _solve(&input, &1), _solve(&input, &(input.len() / 2)) ) } fn _solve(input: &String, skip: &usize) -> usize { let nums: Vec<usize> = input.chars().map(|x| x as usize - '0' as usize).collect(); let mut sum = 0; let l = nums.len(); for (i, m) in nums.iter().enumerate() { let n = nums[(i + *skip as usize) % l]; if n == *m { sum += nums[i]; } } sum } #[cfg(test)] mod test { use day1; #[test] fn test_p1_1() { assert_eq!(day1::_solve(&"1122".to_string(), &1), 3); } #[test] fn
() { assert_eq!(day1::_solve(&"1111".to_string(), &1), 4); } #[test] fn test_p1_3() { assert_eq!(day1::_solve(&"1234".to_string(), &1), 0); } #[test] fn test_p1_4() { assert_eq!(day1::_solve(&"91212129".to_string(), &1), 9); } #[test] fn test_p2_1() { assert_eq!(day1::_solve(&"1212".to_string(), &2), 6); } #[test] fn test_p2_2() { assert_eq!(day1::_solve(&"1221".to_string(), &2), 0); } #[test] fn test_p2_3() { assert_eq!(day1::_solve(&"123425".to_string(), &3), 4); } #[test] fn test_p2_4() { assert_eq!(day1::_solve(&"12131415".to_string(), &4), 4); } }
test_p1_2
identifier_name
mod.rs
pub fn solve(input: &String) -> String { let input = input.lines().nth(0).unwrap().to_string(); format!( "Skip 1: {} Skip n/2: {}", _solve(&input, &1), _solve(&input, &(input.len() / 2)) ) } fn _solve(input: &String, skip: &usize) -> usize { let nums: Vec<usize> = input.chars().map(|x| x as usize - '0' as usize).collect(); let mut sum = 0; let l = nums.len(); for (i, m) in nums.iter().enumerate() { let n = nums[(i + *skip as usize) % l]; if n == *m { sum += nums[i]; } } sum } #[cfg(test)] mod test { use day1; #[test] fn test_p1_1() { assert_eq!(day1::_solve(&"1122".to_string(), &1), 3);
#[test] fn test_p1_2() { assert_eq!(day1::_solve(&"1111".to_string(), &1), 4); } #[test] fn test_p1_3() { assert_eq!(day1::_solve(&"1234".to_string(), &1), 0); } #[test] fn test_p1_4() { assert_eq!(day1::_solve(&"91212129".to_string(), &1), 9); } #[test] fn test_p2_1() { assert_eq!(day1::_solve(&"1212".to_string(), &2), 6); } #[test] fn test_p2_2() { assert_eq!(day1::_solve(&"1221".to_string(), &2), 0); } #[test] fn test_p2_3() { assert_eq!(day1::_solve(&"123425".to_string(), &3), 4); } #[test] fn test_p2_4() { assert_eq!(day1::_solve(&"12131415".to_string(), &4), 4); } }
}
random_line_split
mod.rs
pub fn solve(input: &String) -> String { let input = input.lines().nth(0).unwrap().to_string(); format!( "Skip 1: {} Skip n/2: {}", _solve(&input, &1), _solve(&input, &(input.len() / 2)) ) } fn _solve(input: &String, skip: &usize) -> usize { let nums: Vec<usize> = input.chars().map(|x| x as usize - '0' as usize).collect(); let mut sum = 0; let l = nums.len(); for (i, m) in nums.iter().enumerate() { let n = nums[(i + *skip as usize) % l]; if n == *m
} sum } #[cfg(test)] mod test { use day1; #[test] fn test_p1_1() { assert_eq!(day1::_solve(&"1122".to_string(), &1), 3); } #[test] fn test_p1_2() { assert_eq!(day1::_solve(&"1111".to_string(), &1), 4); } #[test] fn test_p1_3() { assert_eq!(day1::_solve(&"1234".to_string(), &1), 0); } #[test] fn test_p1_4() { assert_eq!(day1::_solve(&"91212129".to_string(), &1), 9); } #[test] fn test_p2_1() { assert_eq!(day1::_solve(&"1212".to_string(), &2), 6); } #[test] fn test_p2_2() { assert_eq!(day1::_solve(&"1221".to_string(), &2), 0); } #[test] fn test_p2_3() { assert_eq!(day1::_solve(&"123425".to_string(), &3), 4); } #[test] fn test_p2_4() { assert_eq!(day1::_solve(&"12131415".to_string(), &4), 4); } }
{ sum += nums[i]; }
conditional_block
mod.rs
pub fn solve(input: &String) -> String { let input = input.lines().nth(0).unwrap().to_string(); format!( "Skip 1: {} Skip n/2: {}", _solve(&input, &1), _solve(&input, &(input.len() / 2)) ) } fn _solve(input: &String, skip: &usize) -> usize { let nums: Vec<usize> = input.chars().map(|x| x as usize - '0' as usize).collect(); let mut sum = 0; let l = nums.len(); for (i, m) in nums.iter().enumerate() { let n = nums[(i + *skip as usize) % l]; if n == *m { sum += nums[i]; } } sum } #[cfg(test)] mod test { use day1; #[test] fn test_p1_1() { assert_eq!(day1::_solve(&"1122".to_string(), &1), 3); } #[test] fn test_p1_2() { assert_eq!(day1::_solve(&"1111".to_string(), &1), 4); } #[test] fn test_p1_3() { assert_eq!(day1::_solve(&"1234".to_string(), &1), 0); } #[test] fn test_p1_4()
#[test] fn test_p2_1() { assert_eq!(day1::_solve(&"1212".to_string(), &2), 6); } #[test] fn test_p2_2() { assert_eq!(day1::_solve(&"1221".to_string(), &2), 0); } #[test] fn test_p2_3() { assert_eq!(day1::_solve(&"123425".to_string(), &3), 4); } #[test] fn test_p2_4() { assert_eq!(day1::_solve(&"12131415".to_string(), &4), 4); } }
{ assert_eq!(day1::_solve(&"91212129".to_string(), &1), 9); }
identifier_body
socket.rs
/* Rust doesn't have unix datagram socket support at the moment. Hope I can ditch * this in the future. * Thanks to @gcourier for his rust-syslog library which I used as a reference. */ use libc; use std::ffi::CString; use std::io::{Result, Error, ErrorKind}; use std::mem; use std::os::unix::io::RawFd; fn construct_sockaddr(path: &CString) -> Result<(libc::sockaddr_storage, usize)> { assert!(mem::size_of::<libc::sockaddr_storage>() >= mem::size_of::<libc::sockaddr_un>()); let mut storage: libc::sockaddr_storage = unsafe { mem::zeroed() }; let s: &mut libc::sockaddr_un = unsafe { mem::transmute(&mut storage) }; let len = path.as_bytes().len(); if len > s.sun_path.len() - 1 { let err = Error::new(ErrorKind::InvalidInput, "Socket path can not be longer than sizeof(sockaddr_storage) - 1"); return Err(err); } s.sun_family = libc::AF_UNIX as libc::sa_family_t; for (slot, value) in s.sun_path.iter_mut().zip(path.as_bytes().iter()) { *slot = *value as i8; } let len = mem::size_of::<libc::sa_family_t>() + len + 1; return Ok((storage, len)); } fn unix_socket(ty: libc::c_int) -> Result<RawFd> { match unsafe { libc::socket(libc::AF_UNIX, ty, 0) } { -1 => Err(Error::last_os_error()), fd => Ok(fd) } } pub struct UnixSocket { fd: RawFd, } impl UnixSocket { pub fn new() -> Result<UnixSocket> { match unsafe { libc::socket(libc::AF_UNIX, libc::SOCK_DGRAM, 0) } { -1 => Err(Error::last_os_error()), fd => Ok(UnixSocket{fd: fd}) } } pub fn sendto(&mut self, buf: &[u8], path: &CString) -> Result<()>
}
{ let (dst, len) = try!(construct_sockaddr(path)); let dstp = &dst as *const libc::sockaddr_storage; let ret = unsafe { libc::sendto(self.fd, buf.as_ptr() as *const libc::c_void, buf.len() as libc::size_t, 0x4000, // MSG_NOSIGNAL dstp as *const libc::sockaddr, len as libc::socklen_t) as libc::c_int }; match ret { -1 => Err(Error::last_os_error()), n if n as usize != buf.len() => Err(Error::new(ErrorKind::Other, "Could not send entire package")), _ => Ok(()) } }
identifier_body
socket.rs
/* Rust doesn't have unix datagram socket support at the moment. Hope I can ditch * this in the future. * Thanks to @gcourier for his rust-syslog library which I used as a reference. */ use libc; use std::ffi::CString; use std::io::{Result, Error, ErrorKind}; use std::mem; use std::os::unix::io::RawFd; fn construct_sockaddr(path: &CString) -> Result<(libc::sockaddr_storage, usize)> { assert!(mem::size_of::<libc::sockaddr_storage>() >= mem::size_of::<libc::sockaddr_un>()); let mut storage: libc::sockaddr_storage = unsafe { mem::zeroed() }; let s: &mut libc::sockaddr_un = unsafe { mem::transmute(&mut storage) }; let len = path.as_bytes().len(); if len > s.sun_path.len() - 1
s.sun_family = libc::AF_UNIX as libc::sa_family_t; for (slot, value) in s.sun_path.iter_mut().zip(path.as_bytes().iter()) { *slot = *value as i8; } let len = mem::size_of::<libc::sa_family_t>() + len + 1; return Ok((storage, len)); } fn unix_socket(ty: libc::c_int) -> Result<RawFd> { match unsafe { libc::socket(libc::AF_UNIX, ty, 0) } { -1 => Err(Error::last_os_error()), fd => Ok(fd) } } pub struct UnixSocket { fd: RawFd, } impl UnixSocket { pub fn new() -> Result<UnixSocket> { match unsafe { libc::socket(libc::AF_UNIX, libc::SOCK_DGRAM, 0) } { -1 => Err(Error::last_os_error()), fd => Ok(UnixSocket{fd: fd}) } } pub fn sendto(&mut self, buf: &[u8], path: &CString) -> Result<()> { let (dst, len) = try!(construct_sockaddr(path)); let dstp = &dst as *const libc::sockaddr_storage; let ret = unsafe { libc::sendto(self.fd, buf.as_ptr() as *const libc::c_void, buf.len() as libc::size_t, 0x4000, // MSG_NOSIGNAL dstp as *const libc::sockaddr, len as libc::socklen_t) as libc::c_int }; match ret { -1 => Err(Error::last_os_error()), n if n as usize!= buf.len() => Err(Error::new(ErrorKind::Other, "Could not send entire package")), _ => Ok(()) } } }
{ let err = Error::new(ErrorKind::InvalidInput, "Socket path can not be longer than sizeof(sockaddr_storage) - 1"); return Err(err); }
conditional_block
socket.rs
/* Rust doesn't have unix datagram socket support at the moment. Hope I can ditch * this in the future. * Thanks to @gcourier for his rust-syslog library which I used as a reference. */ use libc; use std::ffi::CString; use std::io::{Result, Error, ErrorKind}; use std::mem; use std::os::unix::io::RawFd; fn construct_sockaddr(path: &CString) -> Result<(libc::sockaddr_storage, usize)> { assert!(mem::size_of::<libc::sockaddr_storage>() >= mem::size_of::<libc::sockaddr_un>()); let mut storage: libc::sockaddr_storage = unsafe { mem::zeroed() }; let s: &mut libc::sockaddr_un = unsafe { mem::transmute(&mut storage) }; let len = path.as_bytes().len();
return Err(err); } s.sun_family = libc::AF_UNIX as libc::sa_family_t; for (slot, value) in s.sun_path.iter_mut().zip(path.as_bytes().iter()) { *slot = *value as i8; } let len = mem::size_of::<libc::sa_family_t>() + len + 1; return Ok((storage, len)); } fn unix_socket(ty: libc::c_int) -> Result<RawFd> { match unsafe { libc::socket(libc::AF_UNIX, ty, 0) } { -1 => Err(Error::last_os_error()), fd => Ok(fd) } } pub struct UnixSocket { fd: RawFd, } impl UnixSocket { pub fn new() -> Result<UnixSocket> { match unsafe { libc::socket(libc::AF_UNIX, libc::SOCK_DGRAM, 0) } { -1 => Err(Error::last_os_error()), fd => Ok(UnixSocket{fd: fd}) } } pub fn sendto(&mut self, buf: &[u8], path: &CString) -> Result<()> { let (dst, len) = try!(construct_sockaddr(path)); let dstp = &dst as *const libc::sockaddr_storage; let ret = unsafe { libc::sendto(self.fd, buf.as_ptr() as *const libc::c_void, buf.len() as libc::size_t, 0x4000, // MSG_NOSIGNAL dstp as *const libc::sockaddr, len as libc::socklen_t) as libc::c_int }; match ret { -1 => Err(Error::last_os_error()), n if n as usize!= buf.len() => Err(Error::new(ErrorKind::Other, "Could not send entire package")), _ => Ok(()) } } }
if len > s.sun_path.len() - 1 { let err = Error::new(ErrorKind::InvalidInput, "Socket path can not be longer than sizeof(sockaddr_storage) - 1");
random_line_split
socket.rs
/* Rust doesn't have unix datagram socket support at the moment. Hope I can ditch * this in the future. * Thanks to @gcourier for his rust-syslog library which I used as a reference. */ use libc; use std::ffi::CString; use std::io::{Result, Error, ErrorKind}; use std::mem; use std::os::unix::io::RawFd; fn
(path: &CString) -> Result<(libc::sockaddr_storage, usize)> { assert!(mem::size_of::<libc::sockaddr_storage>() >= mem::size_of::<libc::sockaddr_un>()); let mut storage: libc::sockaddr_storage = unsafe { mem::zeroed() }; let s: &mut libc::sockaddr_un = unsafe { mem::transmute(&mut storage) }; let len = path.as_bytes().len(); if len > s.sun_path.len() - 1 { let err = Error::new(ErrorKind::InvalidInput, "Socket path can not be longer than sizeof(sockaddr_storage) - 1"); return Err(err); } s.sun_family = libc::AF_UNIX as libc::sa_family_t; for (slot, value) in s.sun_path.iter_mut().zip(path.as_bytes().iter()) { *slot = *value as i8; } let len = mem::size_of::<libc::sa_family_t>() + len + 1; return Ok((storage, len)); } fn unix_socket(ty: libc::c_int) -> Result<RawFd> { match unsafe { libc::socket(libc::AF_UNIX, ty, 0) } { -1 => Err(Error::last_os_error()), fd => Ok(fd) } } pub struct UnixSocket { fd: RawFd, } impl UnixSocket { pub fn new() -> Result<UnixSocket> { match unsafe { libc::socket(libc::AF_UNIX, libc::SOCK_DGRAM, 0) } { -1 => Err(Error::last_os_error()), fd => Ok(UnixSocket{fd: fd}) } } pub fn sendto(&mut self, buf: &[u8], path: &CString) -> Result<()> { let (dst, len) = try!(construct_sockaddr(path)); let dstp = &dst as *const libc::sockaddr_storage; let ret = unsafe { libc::sendto(self.fd, buf.as_ptr() as *const libc::c_void, buf.len() as libc::size_t, 0x4000, // MSG_NOSIGNAL dstp as *const libc::sockaddr, len as libc::socklen_t) as libc::c_int }; match ret { -1 => Err(Error::last_os_error()), n if n as usize!= buf.len() => Err(Error::new(ErrorKind::Other, "Could not send entire package")), _ => Ok(()) } } }
construct_sockaddr
identifier_name
mod.rs
// Copyright 2017 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! aws ec2 metadata fetcher //! use std::collections::HashMap; #[cfg(test)] use mockito; use openssh_keys::PublicKey; use reqwest::header; use serde_derive::Deserialize; use slog_scope::warn; use crate::errors::*; use crate::providers::MetadataProvider; use crate::retry; #[cfg(test)] mod mock_tests; #[cfg(not(feature = "cl-legacy"))] static ENV_PREFIX: &str = "AWS"; #[cfg(feature = "cl-legacy")] static ENV_PREFIX: &str = "EC2"; #[allow(non_snake_case)] #[derive(Debug, Deserialize)] struct InstanceIdDoc { region: String, } #[derive(Clone, Debug)] pub struct AwsProvider { client: retry::Client, } impl AwsProvider { pub fn try_new() -> Result<AwsProvider> { let client = retry::Client::try_new()?.return_on_404(true); AwsProvider::with_client(client) } fn with_client(client: retry::Client) -> Result<AwsProvider> { let mut client = client; let token = AwsProvider::fetch_imdsv2_token(client.clone()); // If IMDSv2 token is fetched successfully, set the header. // Otherwise, proceed with IMDSv1 mechanism. match token { Ok(t) => { client = client.header( header::HeaderName::from_bytes(b"X-aws-ec2-metadata-token") .chain_err(|| "setting header name for aws imdsv2 metadata")?, header::HeaderValue::from_bytes(t.as_bytes()) .chain_err(|| "setting header value for aws imdsv2 metadata")?, ); } Err(err) => { warn!("failed to fetch aws imdsv2 session token with: {}", err); } } Ok(AwsProvider { client }) } #[cfg(test)] fn endpoint_for(key: &str, _use_latest: bool) -> String { let url = mockito::server_url(); format!("{}/{}", url, key) } #[cfg(not(test))] fn endpoint_for(key: &str, use_latest: bool) -> String { const URL: &str = "http://169.254.169.254/2019-10-01"; const URL_LATEST: &str = "http://169.254.169.254/latest"; if use_latest { format!("{}/{}", URL_LATEST, key) } else { format!("{}/{}", URL, key) } } fn fetch_imdsv2_token(client: retry::Client) -> Result<String> { let token: String = client .header( header::HeaderName::from_bytes(b"X-aws-ec2-metadata-token-ttl-seconds") .chain_err(|| "setting header name for aws imdsv2 token")?, header::HeaderValue::from_bytes(b"21600") .chain_err(|| "setting header value for aws imdsv2 token")?, ) .put( retry::Raw, // NOTE(zonggen): Use `latest` here since other versions would return "403 - Forbidden" AwsProvider::endpoint_for("api/token", true), None, ) .dispatch_put()? .chain_err(|| "unwrapping aws imdsv2 token")?; Ok(token) } fn fetch_ssh_keys(&self) -> Result<Vec<String>> { let keydata: Option<String> = self .client .get( retry::Raw, AwsProvider::endpoint_for("meta-data/public-keys", false), ) .send()?; let mut keys = Vec::new(); if let Some(keys_list) = keydata { for l in keys_list.lines() { let tokens: Vec<&str> = l.split('=').collect(); if tokens.len()!= 2
let key: String = self .client .get( retry::Raw, AwsProvider::endpoint_for( &format!("meta-data/public-keys/{}/openssh-key", tokens[0]), false, ), ) .send()? .ok_or("missing ssh key")?; keys.push(key) } } Ok(keys) } } impl MetadataProvider for AwsProvider { fn attributes(&self) -> Result<HashMap<String, String>> { let mut out = HashMap::with_capacity(6); let add_value = |map: &mut HashMap<_, _>, key: &str, name| -> Result<()> { let value = self .client .get(retry::Raw, AwsProvider::endpoint_for(name, false)) .send()?; if let Some(value) = value { map.insert(key.to_string(), value); } Ok(()) }; add_value( &mut out, &format!("{}_INSTANCE_ID", ENV_PREFIX), "meta-data/instance-id", )?; add_value( &mut out, &format!("{}_INSTANCE_TYPE", ENV_PREFIX), "meta-data/instance-type", )?; add_value( &mut out, &format!("{}_IPV4_LOCAL", ENV_PREFIX), "meta-data/local-ipv4", )?; add_value( &mut out, &format!("{}_IPV4_PUBLIC", ENV_PREFIX), "meta-data/public-ipv4", )?; add_value( &mut out, &format!("{}_AVAILABILITY_ZONE", ENV_PREFIX), "meta-data/placement/availability-zone", )?; add_value( &mut out, &format!("{}_HOSTNAME", ENV_PREFIX), "meta-data/hostname", )?; add_value( &mut out, &format!("{}_PUBLIC_HOSTNAME", ENV_PREFIX), "meta-data/public-hostname", )?; let region = self .client .get( retry::Json, AwsProvider::endpoint_for("dynamic/instance-identity/document", false), ) .send()? .map(|instance_id_doc: InstanceIdDoc| instance_id_doc.region); if let Some(region) = region { out.insert(format!("{}_REGION", ENV_PREFIX), region); } Ok(out) } fn hostname(&self) -> Result<Option<String>> { self.client .get( retry::Raw, AwsProvider::endpoint_for("meta-data/hostname", false), ) .send() } fn ssh_keys(&self) -> Result<Vec<PublicKey>> { self.fetch_ssh_keys().map(|keys| { keys.into_iter() .map(|key| { let key = PublicKey::parse(&key)?; Ok(key) }) .collect::<Result<Vec<_>>>() })? } }
{ return Err("error parsing keyID".into()); }
conditional_block
mod.rs
// Copyright 2017 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! aws ec2 metadata fetcher //! use std::collections::HashMap; #[cfg(test)] use mockito; use openssh_keys::PublicKey; use reqwest::header; use serde_derive::Deserialize; use slog_scope::warn; use crate::errors::*; use crate::providers::MetadataProvider; use crate::retry; #[cfg(test)] mod mock_tests; #[cfg(not(feature = "cl-legacy"))] static ENV_PREFIX: &str = "AWS"; #[cfg(feature = "cl-legacy")] static ENV_PREFIX: &str = "EC2"; #[allow(non_snake_case)] #[derive(Debug, Deserialize)] struct InstanceIdDoc { region: String, }
#[derive(Clone, Debug)] pub struct AwsProvider { client: retry::Client, } impl AwsProvider { pub fn try_new() -> Result<AwsProvider> { let client = retry::Client::try_new()?.return_on_404(true); AwsProvider::with_client(client) } fn with_client(client: retry::Client) -> Result<AwsProvider> { let mut client = client; let token = AwsProvider::fetch_imdsv2_token(client.clone()); // If IMDSv2 token is fetched successfully, set the header. // Otherwise, proceed with IMDSv1 mechanism. match token { Ok(t) => { client = client.header( header::HeaderName::from_bytes(b"X-aws-ec2-metadata-token") .chain_err(|| "setting header name for aws imdsv2 metadata")?, header::HeaderValue::from_bytes(t.as_bytes()) .chain_err(|| "setting header value for aws imdsv2 metadata")?, ); } Err(err) => { warn!("failed to fetch aws imdsv2 session token with: {}", err); } } Ok(AwsProvider { client }) } #[cfg(test)] fn endpoint_for(key: &str, _use_latest: bool) -> String { let url = mockito::server_url(); format!("{}/{}", url, key) } #[cfg(not(test))] fn endpoint_for(key: &str, use_latest: bool) -> String { const URL: &str = "http://169.254.169.254/2019-10-01"; const URL_LATEST: &str = "http://169.254.169.254/latest"; if use_latest { format!("{}/{}", URL_LATEST, key) } else { format!("{}/{}", URL, key) } } fn fetch_imdsv2_token(client: retry::Client) -> Result<String> { let token: String = client .header( header::HeaderName::from_bytes(b"X-aws-ec2-metadata-token-ttl-seconds") .chain_err(|| "setting header name for aws imdsv2 token")?, header::HeaderValue::from_bytes(b"21600") .chain_err(|| "setting header value for aws imdsv2 token")?, ) .put( retry::Raw, // NOTE(zonggen): Use `latest` here since other versions would return "403 - Forbidden" AwsProvider::endpoint_for("api/token", true), None, ) .dispatch_put()? .chain_err(|| "unwrapping aws imdsv2 token")?; Ok(token) } fn fetch_ssh_keys(&self) -> Result<Vec<String>> { let keydata: Option<String> = self .client .get( retry::Raw, AwsProvider::endpoint_for("meta-data/public-keys", false), ) .send()?; let mut keys = Vec::new(); if let Some(keys_list) = keydata { for l in keys_list.lines() { let tokens: Vec<&str> = l.split('=').collect(); if tokens.len()!= 2 { return Err("error parsing keyID".into()); } let key: String = self .client .get( retry::Raw, AwsProvider::endpoint_for( &format!("meta-data/public-keys/{}/openssh-key", tokens[0]), false, ), ) .send()? .ok_or("missing ssh key")?; keys.push(key) } } Ok(keys) } } impl MetadataProvider for AwsProvider { fn attributes(&self) -> Result<HashMap<String, String>> { let mut out = HashMap::with_capacity(6); let add_value = |map: &mut HashMap<_, _>, key: &str, name| -> Result<()> { let value = self .client .get(retry::Raw, AwsProvider::endpoint_for(name, false)) .send()?; if let Some(value) = value { map.insert(key.to_string(), value); } Ok(()) }; add_value( &mut out, &format!("{}_INSTANCE_ID", ENV_PREFIX), "meta-data/instance-id", )?; add_value( &mut out, &format!("{}_INSTANCE_TYPE", ENV_PREFIX), "meta-data/instance-type", )?; add_value( &mut out, &format!("{}_IPV4_LOCAL", ENV_PREFIX), "meta-data/local-ipv4", )?; add_value( &mut out, &format!("{}_IPV4_PUBLIC", ENV_PREFIX), "meta-data/public-ipv4", )?; add_value( &mut out, &format!("{}_AVAILABILITY_ZONE", ENV_PREFIX), "meta-data/placement/availability-zone", )?; add_value( &mut out, &format!("{}_HOSTNAME", ENV_PREFIX), "meta-data/hostname", )?; add_value( &mut out, &format!("{}_PUBLIC_HOSTNAME", ENV_PREFIX), "meta-data/public-hostname", )?; let region = self .client .get( retry::Json, AwsProvider::endpoint_for("dynamic/instance-identity/document", false), ) .send()? .map(|instance_id_doc: InstanceIdDoc| instance_id_doc.region); if let Some(region) = region { out.insert(format!("{}_REGION", ENV_PREFIX), region); } Ok(out) } fn hostname(&self) -> Result<Option<String>> { self.client .get( retry::Raw, AwsProvider::endpoint_for("meta-data/hostname", false), ) .send() } fn ssh_keys(&self) -> Result<Vec<PublicKey>> { self.fetch_ssh_keys().map(|keys| { keys.into_iter() .map(|key| { let key = PublicKey::parse(&key)?; Ok(key) }) .collect::<Result<Vec<_>>>() })? } }
random_line_split
mod.rs
// Copyright 2017 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! aws ec2 metadata fetcher //! use std::collections::HashMap; #[cfg(test)] use mockito; use openssh_keys::PublicKey; use reqwest::header; use serde_derive::Deserialize; use slog_scope::warn; use crate::errors::*; use crate::providers::MetadataProvider; use crate::retry; #[cfg(test)] mod mock_tests; #[cfg(not(feature = "cl-legacy"))] static ENV_PREFIX: &str = "AWS"; #[cfg(feature = "cl-legacy")] static ENV_PREFIX: &str = "EC2"; #[allow(non_snake_case)] #[derive(Debug, Deserialize)] struct InstanceIdDoc { region: String, } #[derive(Clone, Debug)] pub struct AwsProvider { client: retry::Client, } impl AwsProvider { pub fn try_new() -> Result<AwsProvider> { let client = retry::Client::try_new()?.return_on_404(true); AwsProvider::with_client(client) } fn with_client(client: retry::Client) -> Result<AwsProvider> { let mut client = client; let token = AwsProvider::fetch_imdsv2_token(client.clone()); // If IMDSv2 token is fetched successfully, set the header. // Otherwise, proceed with IMDSv1 mechanism. match token { Ok(t) => { client = client.header( header::HeaderName::from_bytes(b"X-aws-ec2-metadata-token") .chain_err(|| "setting header name for aws imdsv2 metadata")?, header::HeaderValue::from_bytes(t.as_bytes()) .chain_err(|| "setting header value for aws imdsv2 metadata")?, ); } Err(err) => { warn!("failed to fetch aws imdsv2 session token with: {}", err); } } Ok(AwsProvider { client }) } #[cfg(test)] fn
(key: &str, _use_latest: bool) -> String { let url = mockito::server_url(); format!("{}/{}", url, key) } #[cfg(not(test))] fn endpoint_for(key: &str, use_latest: bool) -> String { const URL: &str = "http://169.254.169.254/2019-10-01"; const URL_LATEST: &str = "http://169.254.169.254/latest"; if use_latest { format!("{}/{}", URL_LATEST, key) } else { format!("{}/{}", URL, key) } } fn fetch_imdsv2_token(client: retry::Client) -> Result<String> { let token: String = client .header( header::HeaderName::from_bytes(b"X-aws-ec2-metadata-token-ttl-seconds") .chain_err(|| "setting header name for aws imdsv2 token")?, header::HeaderValue::from_bytes(b"21600") .chain_err(|| "setting header value for aws imdsv2 token")?, ) .put( retry::Raw, // NOTE(zonggen): Use `latest` here since other versions would return "403 - Forbidden" AwsProvider::endpoint_for("api/token", true), None, ) .dispatch_put()? .chain_err(|| "unwrapping aws imdsv2 token")?; Ok(token) } fn fetch_ssh_keys(&self) -> Result<Vec<String>> { let keydata: Option<String> = self .client .get( retry::Raw, AwsProvider::endpoint_for("meta-data/public-keys", false), ) .send()?; let mut keys = Vec::new(); if let Some(keys_list) = keydata { for l in keys_list.lines() { let tokens: Vec<&str> = l.split('=').collect(); if tokens.len()!= 2 { return Err("error parsing keyID".into()); } let key: String = self .client .get( retry::Raw, AwsProvider::endpoint_for( &format!("meta-data/public-keys/{}/openssh-key", tokens[0]), false, ), ) .send()? .ok_or("missing ssh key")?; keys.push(key) } } Ok(keys) } } impl MetadataProvider for AwsProvider { fn attributes(&self) -> Result<HashMap<String, String>> { let mut out = HashMap::with_capacity(6); let add_value = |map: &mut HashMap<_, _>, key: &str, name| -> Result<()> { let value = self .client .get(retry::Raw, AwsProvider::endpoint_for(name, false)) .send()?; if let Some(value) = value { map.insert(key.to_string(), value); } Ok(()) }; add_value( &mut out, &format!("{}_INSTANCE_ID", ENV_PREFIX), "meta-data/instance-id", )?; add_value( &mut out, &format!("{}_INSTANCE_TYPE", ENV_PREFIX), "meta-data/instance-type", )?; add_value( &mut out, &format!("{}_IPV4_LOCAL", ENV_PREFIX), "meta-data/local-ipv4", )?; add_value( &mut out, &format!("{}_IPV4_PUBLIC", ENV_PREFIX), "meta-data/public-ipv4", )?; add_value( &mut out, &format!("{}_AVAILABILITY_ZONE", ENV_PREFIX), "meta-data/placement/availability-zone", )?; add_value( &mut out, &format!("{}_HOSTNAME", ENV_PREFIX), "meta-data/hostname", )?; add_value( &mut out, &format!("{}_PUBLIC_HOSTNAME", ENV_PREFIX), "meta-data/public-hostname", )?; let region = self .client .get( retry::Json, AwsProvider::endpoint_for("dynamic/instance-identity/document", false), ) .send()? .map(|instance_id_doc: InstanceIdDoc| instance_id_doc.region); if let Some(region) = region { out.insert(format!("{}_REGION", ENV_PREFIX), region); } Ok(out) } fn hostname(&self) -> Result<Option<String>> { self.client .get( retry::Raw, AwsProvider::endpoint_for("meta-data/hostname", false), ) .send() } fn ssh_keys(&self) -> Result<Vec<PublicKey>> { self.fetch_ssh_keys().map(|keys| { keys.into_iter() .map(|key| { let key = PublicKey::parse(&key)?; Ok(key) }) .collect::<Result<Vec<_>>>() })? } }
endpoint_for
identifier_name
mod.rs
// Copyright 2017 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! aws ec2 metadata fetcher //! use std::collections::HashMap; #[cfg(test)] use mockito; use openssh_keys::PublicKey; use reqwest::header; use serde_derive::Deserialize; use slog_scope::warn; use crate::errors::*; use crate::providers::MetadataProvider; use crate::retry; #[cfg(test)] mod mock_tests; #[cfg(not(feature = "cl-legacy"))] static ENV_PREFIX: &str = "AWS"; #[cfg(feature = "cl-legacy")] static ENV_PREFIX: &str = "EC2"; #[allow(non_snake_case)] #[derive(Debug, Deserialize)] struct InstanceIdDoc { region: String, } #[derive(Clone, Debug)] pub struct AwsProvider { client: retry::Client, } impl AwsProvider { pub fn try_new() -> Result<AwsProvider> { let client = retry::Client::try_new()?.return_on_404(true); AwsProvider::with_client(client) } fn with_client(client: retry::Client) -> Result<AwsProvider> { let mut client = client; let token = AwsProvider::fetch_imdsv2_token(client.clone()); // If IMDSv2 token is fetched successfully, set the header. // Otherwise, proceed with IMDSv1 mechanism. match token { Ok(t) => { client = client.header( header::HeaderName::from_bytes(b"X-aws-ec2-metadata-token") .chain_err(|| "setting header name for aws imdsv2 metadata")?, header::HeaderValue::from_bytes(t.as_bytes()) .chain_err(|| "setting header value for aws imdsv2 metadata")?, ); } Err(err) => { warn!("failed to fetch aws imdsv2 session token with: {}", err); } } Ok(AwsProvider { client }) } #[cfg(test)] fn endpoint_for(key: &str, _use_latest: bool) -> String
#[cfg(not(test))] fn endpoint_for(key: &str, use_latest: bool) -> String { const URL: &str = "http://169.254.169.254/2019-10-01"; const URL_LATEST: &str = "http://169.254.169.254/latest"; if use_latest { format!("{}/{}", URL_LATEST, key) } else { format!("{}/{}", URL, key) } } fn fetch_imdsv2_token(client: retry::Client) -> Result<String> { let token: String = client .header( header::HeaderName::from_bytes(b"X-aws-ec2-metadata-token-ttl-seconds") .chain_err(|| "setting header name for aws imdsv2 token")?, header::HeaderValue::from_bytes(b"21600") .chain_err(|| "setting header value for aws imdsv2 token")?, ) .put( retry::Raw, // NOTE(zonggen): Use `latest` here since other versions would return "403 - Forbidden" AwsProvider::endpoint_for("api/token", true), None, ) .dispatch_put()? .chain_err(|| "unwrapping aws imdsv2 token")?; Ok(token) } fn fetch_ssh_keys(&self) -> Result<Vec<String>> { let keydata: Option<String> = self .client .get( retry::Raw, AwsProvider::endpoint_for("meta-data/public-keys", false), ) .send()?; let mut keys = Vec::new(); if let Some(keys_list) = keydata { for l in keys_list.lines() { let tokens: Vec<&str> = l.split('=').collect(); if tokens.len()!= 2 { return Err("error parsing keyID".into()); } let key: String = self .client .get( retry::Raw, AwsProvider::endpoint_for( &format!("meta-data/public-keys/{}/openssh-key", tokens[0]), false, ), ) .send()? .ok_or("missing ssh key")?; keys.push(key) } } Ok(keys) } } impl MetadataProvider for AwsProvider { fn attributes(&self) -> Result<HashMap<String, String>> { let mut out = HashMap::with_capacity(6); let add_value = |map: &mut HashMap<_, _>, key: &str, name| -> Result<()> { let value = self .client .get(retry::Raw, AwsProvider::endpoint_for(name, false)) .send()?; if let Some(value) = value { map.insert(key.to_string(), value); } Ok(()) }; add_value( &mut out, &format!("{}_INSTANCE_ID", ENV_PREFIX), "meta-data/instance-id", )?; add_value( &mut out, &format!("{}_INSTANCE_TYPE", ENV_PREFIX), "meta-data/instance-type", )?; add_value( &mut out, &format!("{}_IPV4_LOCAL", ENV_PREFIX), "meta-data/local-ipv4", )?; add_value( &mut out, &format!("{}_IPV4_PUBLIC", ENV_PREFIX), "meta-data/public-ipv4", )?; add_value( &mut out, &format!("{}_AVAILABILITY_ZONE", ENV_PREFIX), "meta-data/placement/availability-zone", )?; add_value( &mut out, &format!("{}_HOSTNAME", ENV_PREFIX), "meta-data/hostname", )?; add_value( &mut out, &format!("{}_PUBLIC_HOSTNAME", ENV_PREFIX), "meta-data/public-hostname", )?; let region = self .client .get( retry::Json, AwsProvider::endpoint_for("dynamic/instance-identity/document", false), ) .send()? .map(|instance_id_doc: InstanceIdDoc| instance_id_doc.region); if let Some(region) = region { out.insert(format!("{}_REGION", ENV_PREFIX), region); } Ok(out) } fn hostname(&self) -> Result<Option<String>> { self.client .get( retry::Raw, AwsProvider::endpoint_for("meta-data/hostname", false), ) .send() } fn ssh_keys(&self) -> Result<Vec<PublicKey>> { self.fetch_ssh_keys().map(|keys| { keys.into_iter() .map(|key| { let key = PublicKey::parse(&key)?; Ok(key) }) .collect::<Result<Vec<_>>>() })? } }
{ let url = mockito::server_url(); format!("{}/{}", url, key) }
identifier_body
mod.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ use std::marker::PhantomData; use std::sync::Arc; use async_trait::async_trait; use edenapi::BlockingResponse; use edenapi::EdenApi; use edenapi::EdenApiError; use edenapi::Response; use edenapi_types::EdenApiServerError;
use types::Key; use crate::datastore::HgIdMutableDeltaStore; use crate::datastore::RemoteDataStore; use crate::historystore::HgIdMutableHistoryStore; use crate::historystore::RemoteHistoryStore; use crate::remotestore::HgIdRemoteStore; use crate::types::StoreKey; mod data; mod history; use data::EdenApiDataStore; use history::EdenApiHistoryStore; /// Convenience aliases for file and tree stores. pub type EdenApiFileStore = EdenApiRemoteStore<File>; pub type EdenApiTreeStore = EdenApiRemoteStore<Tree>; /// A shim around an EdenAPI client that implements the various traits of /// Mercurial's storage layer, allowing a type that implements `EdenApi` to be /// used alongside other Mercurial data and history stores. /// /// Note that this struct does not allow for data fetching on its own, because /// it does not contain a mutable store into which to write the fetched data. /// Use the methods from the `HgIdRemoteStore` trait to provide an appropriate /// mutable store. #[derive(Clone)] pub struct EdenApiRemoteStore<T> { client: Arc<dyn EdenApi>, _phantom: PhantomData<T>, } impl<T: EdenApiStoreKind> EdenApiRemoteStore<T> { /// Create a new EdenApiRemoteStore using the given EdenAPI client. /// /// The current design of the storage layer also requires a distinction /// between stores that provide file data and stores that provide tree data. /// (This is because both kinds of data are fetched via the `prefetch()` /// method from the `RemoteDataStore` trait.) /// /// The kind of data fetched by a store can be specified via a marker type; /// in particular, `File` or `Tree`. For example, a store that fetches file /// data would be created as follows: /// /// ```rust,ignore /// let store = EdenApiStore::<File>::new(edenapi); /// ``` pub fn new(client: Arc<dyn EdenApi>) -> Arc<Self> { Arc::new(Self { client, _phantom: PhantomData, }) } } impl HgIdRemoteStore for EdenApiRemoteStore<File> { fn datastore( self: Arc<Self>, store: Arc<dyn HgIdMutableDeltaStore>, ) -> Arc<dyn RemoteDataStore> { Arc::new(EdenApiDataStore::new(self, store)) } fn historystore( self: Arc<Self>, store: Arc<dyn HgIdMutableHistoryStore>, ) -> Arc<dyn RemoteHistoryStore> { Arc::new(EdenApiHistoryStore::new(self, store)) } } impl HgIdRemoteStore for EdenApiRemoteStore<Tree> { fn datastore( self: Arc<Self>, store: Arc<dyn HgIdMutableDeltaStore>, ) -> Arc<dyn RemoteDataStore> { Arc::new(EdenApiDataStore::new(self, store)) } fn historystore( self: Arc<Self>, _store: Arc<dyn HgIdMutableHistoryStore>, ) -> Arc<dyn RemoteHistoryStore> { unimplemented!("EdenAPI does not support fetching tree history") } } /// Marker type indicating that the store fetches file data. pub enum File {} /// Marker type indicating that the store fetches tree data. pub enum Tree {} impl EdenApiFileStore { pub fn files_blocking( &self, keys: Vec<Key>, ) -> Result<BlockingResponse<FileResponse>, EdenApiError> { BlockingResponse::from_async(self.client.files(keys)) } pub fn files_attrs_blocking( &self, reqs: Vec<FileSpec>, ) -> Result<BlockingResponse<FileResponse>, EdenApiError> { BlockingResponse::from_async(self.client.files_attrs(reqs)) } pub async fn files_attrs( &self, reqs: Vec<FileSpec>, ) -> Result<Response<FileResponse>, EdenApiError> { self.client.files_attrs(reqs).await } } impl EdenApiTreeStore { pub fn trees_blocking( &self, keys: Vec<Key>, attributes: Option<TreeAttributes>, ) -> Result<BlockingResponse<Result<TreeEntry, EdenApiServerError>>, EdenApiError> { BlockingResponse::from_async(self.client.trees(keys, attributes)) } } /// Trait that provides a common interface for calling the `files` and `trees` /// methods on an EdenAPI client. #[async_trait] pub trait EdenApiStoreKind: Send + Sync +'static { async fn prefetch_files( _client: Arc<dyn EdenApi>, _keys: Vec<Key>, ) -> Result<Response<FileResponse>, EdenApiError> { unimplemented!("fetching files not supported for this store") } async fn prefetch_trees( _client: Arc<dyn EdenApi>, _keys: Vec<Key>, _attributes: Option<TreeAttributes>, ) -> Result<Response<Result<TreeEntry, EdenApiServerError>>, EdenApiError> { unimplemented!("fetching trees not supported for this store") } } #[async_trait] impl EdenApiStoreKind for File { async fn prefetch_files( client: Arc<dyn EdenApi>, keys: Vec<Key>, ) -> Result<Response<FileResponse>, EdenApiError> { client.files(keys).await } } #[async_trait] impl EdenApiStoreKind for Tree { async fn prefetch_trees( client: Arc<dyn EdenApi>, keys: Vec<Key>, attributes: Option<TreeAttributes>, ) -> Result<Response<Result<TreeEntry, EdenApiServerError>>, EdenApiError> { client.trees(keys, attributes).await } } /// Return only the HgId keys from the given iterator. /// EdenAPI cannot fetch content-addressed LFS blobs. fn hgid_keys<'a>(keys: impl IntoIterator<Item = &'a StoreKey>) -> Vec<Key> { keys.into_iter() .filter_map(|k| match k { StoreKey::HgId(k) => Some(k.clone()), StoreKey::Content(..) => None, }) .collect() }
use edenapi_types::FileResponse; use edenapi_types::FileSpec; use edenapi_types::TreeAttributes; use edenapi_types::TreeEntry;
random_line_split
mod.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ use std::marker::PhantomData; use std::sync::Arc; use async_trait::async_trait; use edenapi::BlockingResponse; use edenapi::EdenApi; use edenapi::EdenApiError; use edenapi::Response; use edenapi_types::EdenApiServerError; use edenapi_types::FileResponse; use edenapi_types::FileSpec; use edenapi_types::TreeAttributes; use edenapi_types::TreeEntry; use types::Key; use crate::datastore::HgIdMutableDeltaStore; use crate::datastore::RemoteDataStore; use crate::historystore::HgIdMutableHistoryStore; use crate::historystore::RemoteHistoryStore; use crate::remotestore::HgIdRemoteStore; use crate::types::StoreKey; mod data; mod history; use data::EdenApiDataStore; use history::EdenApiHistoryStore; /// Convenience aliases for file and tree stores. pub type EdenApiFileStore = EdenApiRemoteStore<File>; pub type EdenApiTreeStore = EdenApiRemoteStore<Tree>; /// A shim around an EdenAPI client that implements the various traits of /// Mercurial's storage layer, allowing a type that implements `EdenApi` to be /// used alongside other Mercurial data and history stores. /// /// Note that this struct does not allow for data fetching on its own, because /// it does not contain a mutable store into which to write the fetched data. /// Use the methods from the `HgIdRemoteStore` trait to provide an appropriate /// mutable store. #[derive(Clone)] pub struct EdenApiRemoteStore<T> { client: Arc<dyn EdenApi>, _phantom: PhantomData<T>, } impl<T: EdenApiStoreKind> EdenApiRemoteStore<T> { /// Create a new EdenApiRemoteStore using the given EdenAPI client. /// /// The current design of the storage layer also requires a distinction /// between stores that provide file data and stores that provide tree data. /// (This is because both kinds of data are fetched via the `prefetch()` /// method from the `RemoteDataStore` trait.) /// /// The kind of data fetched by a store can be specified via a marker type; /// in particular, `File` or `Tree`. For example, a store that fetches file /// data would be created as follows: /// /// ```rust,ignore /// let store = EdenApiStore::<File>::new(edenapi); /// ``` pub fn new(client: Arc<dyn EdenApi>) -> Arc<Self> { Arc::new(Self { client, _phantom: PhantomData, }) } } impl HgIdRemoteStore for EdenApiRemoteStore<File> { fn datastore( self: Arc<Self>, store: Arc<dyn HgIdMutableDeltaStore>, ) -> Arc<dyn RemoteDataStore> { Arc::new(EdenApiDataStore::new(self, store)) } fn historystore( self: Arc<Self>, store: Arc<dyn HgIdMutableHistoryStore>, ) -> Arc<dyn RemoteHistoryStore> { Arc::new(EdenApiHistoryStore::new(self, store)) } } impl HgIdRemoteStore for EdenApiRemoteStore<Tree> { fn datastore( self: Arc<Self>, store: Arc<dyn HgIdMutableDeltaStore>, ) -> Arc<dyn RemoteDataStore>
fn historystore( self: Arc<Self>, _store: Arc<dyn HgIdMutableHistoryStore>, ) -> Arc<dyn RemoteHistoryStore> { unimplemented!("EdenAPI does not support fetching tree history") } } /// Marker type indicating that the store fetches file data. pub enum File {} /// Marker type indicating that the store fetches tree data. pub enum Tree {} impl EdenApiFileStore { pub fn files_blocking( &self, keys: Vec<Key>, ) -> Result<BlockingResponse<FileResponse>, EdenApiError> { BlockingResponse::from_async(self.client.files(keys)) } pub fn files_attrs_blocking( &self, reqs: Vec<FileSpec>, ) -> Result<BlockingResponse<FileResponse>, EdenApiError> { BlockingResponse::from_async(self.client.files_attrs(reqs)) } pub async fn files_attrs( &self, reqs: Vec<FileSpec>, ) -> Result<Response<FileResponse>, EdenApiError> { self.client.files_attrs(reqs).await } } impl EdenApiTreeStore { pub fn trees_blocking( &self, keys: Vec<Key>, attributes: Option<TreeAttributes>, ) -> Result<BlockingResponse<Result<TreeEntry, EdenApiServerError>>, EdenApiError> { BlockingResponse::from_async(self.client.trees(keys, attributes)) } } /// Trait that provides a common interface for calling the `files` and `trees` /// methods on an EdenAPI client. #[async_trait] pub trait EdenApiStoreKind: Send + Sync +'static { async fn prefetch_files( _client: Arc<dyn EdenApi>, _keys: Vec<Key>, ) -> Result<Response<FileResponse>, EdenApiError> { unimplemented!("fetching files not supported for this store") } async fn prefetch_trees( _client: Arc<dyn EdenApi>, _keys: Vec<Key>, _attributes: Option<TreeAttributes>, ) -> Result<Response<Result<TreeEntry, EdenApiServerError>>, EdenApiError> { unimplemented!("fetching trees not supported for this store") } } #[async_trait] impl EdenApiStoreKind for File { async fn prefetch_files( client: Arc<dyn EdenApi>, keys: Vec<Key>, ) -> Result<Response<FileResponse>, EdenApiError> { client.files(keys).await } } #[async_trait] impl EdenApiStoreKind for Tree { async fn prefetch_trees( client: Arc<dyn EdenApi>, keys: Vec<Key>, attributes: Option<TreeAttributes>, ) -> Result<Response<Result<TreeEntry, EdenApiServerError>>, EdenApiError> { client.trees(keys, attributes).await } } /// Return only the HgId keys from the given iterator. /// EdenAPI cannot fetch content-addressed LFS blobs. fn hgid_keys<'a>(keys: impl IntoIterator<Item = &'a StoreKey>) -> Vec<Key> { keys.into_iter() .filter_map(|k| match k { StoreKey::HgId(k) => Some(k.clone()), StoreKey::Content(..) => None, }) .collect() }
{ Arc::new(EdenApiDataStore::new(self, store)) }
identifier_body
mod.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ use std::marker::PhantomData; use std::sync::Arc; use async_trait::async_trait; use edenapi::BlockingResponse; use edenapi::EdenApi; use edenapi::EdenApiError; use edenapi::Response; use edenapi_types::EdenApiServerError; use edenapi_types::FileResponse; use edenapi_types::FileSpec; use edenapi_types::TreeAttributes; use edenapi_types::TreeEntry; use types::Key; use crate::datastore::HgIdMutableDeltaStore; use crate::datastore::RemoteDataStore; use crate::historystore::HgIdMutableHistoryStore; use crate::historystore::RemoteHistoryStore; use crate::remotestore::HgIdRemoteStore; use crate::types::StoreKey; mod data; mod history; use data::EdenApiDataStore; use history::EdenApiHistoryStore; /// Convenience aliases for file and tree stores. pub type EdenApiFileStore = EdenApiRemoteStore<File>; pub type EdenApiTreeStore = EdenApiRemoteStore<Tree>; /// A shim around an EdenAPI client that implements the various traits of /// Mercurial's storage layer, allowing a type that implements `EdenApi` to be /// used alongside other Mercurial data and history stores. /// /// Note that this struct does not allow for data fetching on its own, because /// it does not contain a mutable store into which to write the fetched data. /// Use the methods from the `HgIdRemoteStore` trait to provide an appropriate /// mutable store. #[derive(Clone)] pub struct EdenApiRemoteStore<T> { client: Arc<dyn EdenApi>, _phantom: PhantomData<T>, } impl<T: EdenApiStoreKind> EdenApiRemoteStore<T> { /// Create a new EdenApiRemoteStore using the given EdenAPI client. /// /// The current design of the storage layer also requires a distinction /// between stores that provide file data and stores that provide tree data. /// (This is because both kinds of data are fetched via the `prefetch()` /// method from the `RemoteDataStore` trait.) /// /// The kind of data fetched by a store can be specified via a marker type; /// in particular, `File` or `Tree`. For example, a store that fetches file /// data would be created as follows: /// /// ```rust,ignore /// let store = EdenApiStore::<File>::new(edenapi); /// ``` pub fn
(client: Arc<dyn EdenApi>) -> Arc<Self> { Arc::new(Self { client, _phantom: PhantomData, }) } } impl HgIdRemoteStore for EdenApiRemoteStore<File> { fn datastore( self: Arc<Self>, store: Arc<dyn HgIdMutableDeltaStore>, ) -> Arc<dyn RemoteDataStore> { Arc::new(EdenApiDataStore::new(self, store)) } fn historystore( self: Arc<Self>, store: Arc<dyn HgIdMutableHistoryStore>, ) -> Arc<dyn RemoteHistoryStore> { Arc::new(EdenApiHistoryStore::new(self, store)) } } impl HgIdRemoteStore for EdenApiRemoteStore<Tree> { fn datastore( self: Arc<Self>, store: Arc<dyn HgIdMutableDeltaStore>, ) -> Arc<dyn RemoteDataStore> { Arc::new(EdenApiDataStore::new(self, store)) } fn historystore( self: Arc<Self>, _store: Arc<dyn HgIdMutableHistoryStore>, ) -> Arc<dyn RemoteHistoryStore> { unimplemented!("EdenAPI does not support fetching tree history") } } /// Marker type indicating that the store fetches file data. pub enum File {} /// Marker type indicating that the store fetches tree data. pub enum Tree {} impl EdenApiFileStore { pub fn files_blocking( &self, keys: Vec<Key>, ) -> Result<BlockingResponse<FileResponse>, EdenApiError> { BlockingResponse::from_async(self.client.files(keys)) } pub fn files_attrs_blocking( &self, reqs: Vec<FileSpec>, ) -> Result<BlockingResponse<FileResponse>, EdenApiError> { BlockingResponse::from_async(self.client.files_attrs(reqs)) } pub async fn files_attrs( &self, reqs: Vec<FileSpec>, ) -> Result<Response<FileResponse>, EdenApiError> { self.client.files_attrs(reqs).await } } impl EdenApiTreeStore { pub fn trees_blocking( &self, keys: Vec<Key>, attributes: Option<TreeAttributes>, ) -> Result<BlockingResponse<Result<TreeEntry, EdenApiServerError>>, EdenApiError> { BlockingResponse::from_async(self.client.trees(keys, attributes)) } } /// Trait that provides a common interface for calling the `files` and `trees` /// methods on an EdenAPI client. #[async_trait] pub trait EdenApiStoreKind: Send + Sync +'static { async fn prefetch_files( _client: Arc<dyn EdenApi>, _keys: Vec<Key>, ) -> Result<Response<FileResponse>, EdenApiError> { unimplemented!("fetching files not supported for this store") } async fn prefetch_trees( _client: Arc<dyn EdenApi>, _keys: Vec<Key>, _attributes: Option<TreeAttributes>, ) -> Result<Response<Result<TreeEntry, EdenApiServerError>>, EdenApiError> { unimplemented!("fetching trees not supported for this store") } } #[async_trait] impl EdenApiStoreKind for File { async fn prefetch_files( client: Arc<dyn EdenApi>, keys: Vec<Key>, ) -> Result<Response<FileResponse>, EdenApiError> { client.files(keys).await } } #[async_trait] impl EdenApiStoreKind for Tree { async fn prefetch_trees( client: Arc<dyn EdenApi>, keys: Vec<Key>, attributes: Option<TreeAttributes>, ) -> Result<Response<Result<TreeEntry, EdenApiServerError>>, EdenApiError> { client.trees(keys, attributes).await } } /// Return only the HgId keys from the given iterator. /// EdenAPI cannot fetch content-addressed LFS blobs. fn hgid_keys<'a>(keys: impl IntoIterator<Item = &'a StoreKey>) -> Vec<Key> { keys.into_iter() .filter_map(|k| match k { StoreKey::HgId(k) => Some(k.clone()), StoreKey::Content(..) => None, }) .collect() }
new
identifier_name
coherence-overlap-issue-23516-inherent.rs
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Tests that we consider `Box<U>:!Sugar` to be ambiguous, even // though we see no impl of `Sugar` for `Box`. Therefore, an overlap // error is reported for the following pair of impls (#23516). pub trait Sugar {} struct Cake<X>(X); impl<T:Sugar> Cake<T> { fn dummy(&self) { } } //~^ ERROR E0592 impl<U:Sugar> Cake<Box<U>> { fn
(&self) { } } fn main() { }
dummy
identifier_name
coherence-overlap-issue-23516-inherent.rs
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Tests that we consider `Box<U>:!Sugar` to be ambiguous, even
struct Cake<X>(X); impl<T:Sugar> Cake<T> { fn dummy(&self) { } } //~^ ERROR E0592 impl<U:Sugar> Cake<Box<U>> { fn dummy(&self) { } } fn main() { }
// though we see no impl of `Sugar` for `Box`. Therefore, an overlap // error is reported for the following pair of impls (#23516). pub trait Sugar {}
random_line_split
read_pool.rs
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0. //! Distinct thread pools to handle read commands having different priority levels. use crate::config::StorageReadPoolConfig; use crate::storage::kv::{destroy_tls_engine, set_tls_engine, Engine, FlowStatsReporter}; use crate::storage::metrics; use file_system::{set_io_type, IOType}; use std::sync::{Arc, Mutex}; use tikv_util::yatp_pool::{Config, DefaultTicker, FuturePool, PoolTicker, YatpPoolBuilder}; #[derive(Clone)] struct FuturePoolTicker<R: FlowStatsReporter> { pub reporter: R, } impl<R: FlowStatsReporter> PoolTicker for FuturePoolTicker<R> { fn on_tick(&mut self) { metrics::tls_flush(&self.reporter); } } /// Build respective thread pools to handle read commands of different priority levels. pub fn build_read_pool<E: Engine, R: FlowStatsReporter>( config: &StorageReadPoolConfig, reporter: R, engine: E, ) -> Vec<FuturePool>
destroy_tls_engine::<E>(); }) .build_future_pool() }) .collect() } /// Build a thread pool that has default tick behavior for testing. pub fn build_read_pool_for_test<E: Engine>( config: &StorageReadPoolConfig, engine: E, ) -> Vec<FuturePool> { let names = vec!["store-read-low", "store-read-normal", "store-read-high"]; let configs: Vec<Config> = config.to_yatp_pool_configs(); assert_eq!(configs.len(), 3); configs .into_iter() .zip(names) .map(|(config, name)| { let engine = Arc::new(Mutex::new(engine.clone())); YatpPoolBuilder::new(DefaultTicker::default()) .config(config) .name_prefix(name) .after_start(move || { set_tls_engine(engine.lock().unwrap().clone()); set_io_type(IOType::ForegroundRead); }) // Safety: we call `set_` and `destroy_` with the same engine type. .before_stop(|| unsafe { destroy_tls_engine::<E>() }) .build_future_pool() }) .collect() }
{ let names = vec!["store-read-low", "store-read-normal", "store-read-high"]; let configs: Vec<Config> = config.to_yatp_pool_configs(); assert_eq!(configs.len(), 3); configs .into_iter() .zip(names) .map(|(config, name)| { let reporter = reporter.clone(); let engine = Arc::new(Mutex::new(engine.clone())); YatpPoolBuilder::new(FuturePoolTicker { reporter }) .name_prefix(name) .config(config) .after_start(move || { set_tls_engine(engine.lock().unwrap().clone()); set_io_type(IOType::ForegroundRead); }) .before_stop(move || unsafe { // Safety: we call `set_` and `destroy_` with the same engine type.
identifier_body
read_pool.rs
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0. //! Distinct thread pools to handle read commands having different priority levels. use crate::config::StorageReadPoolConfig; use crate::storage::kv::{destroy_tls_engine, set_tls_engine, Engine, FlowStatsReporter}; use crate::storage::metrics; use file_system::{set_io_type, IOType}; use std::sync::{Arc, Mutex}; use tikv_util::yatp_pool::{Config, DefaultTicker, FuturePool, PoolTicker, YatpPoolBuilder}; #[derive(Clone)] struct FuturePoolTicker<R: FlowStatsReporter> { pub reporter: R, } impl<R: FlowStatsReporter> PoolTicker for FuturePoolTicker<R> { fn on_tick(&mut self) { metrics::tls_flush(&self.reporter); } } /// Build respective thread pools to handle read commands of different priority levels. pub fn build_read_pool<E: Engine, R: FlowStatsReporter>( config: &StorageReadPoolConfig, reporter: R,
configs .into_iter() .zip(names) .map(|(config, name)| { let reporter = reporter.clone(); let engine = Arc::new(Mutex::new(engine.clone())); YatpPoolBuilder::new(FuturePoolTicker { reporter }) .name_prefix(name) .config(config) .after_start(move || { set_tls_engine(engine.lock().unwrap().clone()); set_io_type(IOType::ForegroundRead); }) .before_stop(move || unsafe { // Safety: we call `set_` and `destroy_` with the same engine type. destroy_tls_engine::<E>(); }) .build_future_pool() }) .collect() } /// Build a thread pool that has default tick behavior for testing. pub fn build_read_pool_for_test<E: Engine>( config: &StorageReadPoolConfig, engine: E, ) -> Vec<FuturePool> { let names = vec!["store-read-low", "store-read-normal", "store-read-high"]; let configs: Vec<Config> = config.to_yatp_pool_configs(); assert_eq!(configs.len(), 3); configs .into_iter() .zip(names) .map(|(config, name)| { let engine = Arc::new(Mutex::new(engine.clone())); YatpPoolBuilder::new(DefaultTicker::default()) .config(config) .name_prefix(name) .after_start(move || { set_tls_engine(engine.lock().unwrap().clone()); set_io_type(IOType::ForegroundRead); }) // Safety: we call `set_` and `destroy_` with the same engine type. .before_stop(|| unsafe { destroy_tls_engine::<E>() }) .build_future_pool() }) .collect() }
engine: E, ) -> Vec<FuturePool> { let names = vec!["store-read-low", "store-read-normal", "store-read-high"]; let configs: Vec<Config> = config.to_yatp_pool_configs(); assert_eq!(configs.len(), 3);
random_line_split
read_pool.rs
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0. //! Distinct thread pools to handle read commands having different priority levels. use crate::config::StorageReadPoolConfig; use crate::storage::kv::{destroy_tls_engine, set_tls_engine, Engine, FlowStatsReporter}; use crate::storage::metrics; use file_system::{set_io_type, IOType}; use std::sync::{Arc, Mutex}; use tikv_util::yatp_pool::{Config, DefaultTicker, FuturePool, PoolTicker, YatpPoolBuilder}; #[derive(Clone)] struct FuturePoolTicker<R: FlowStatsReporter> { pub reporter: R, } impl<R: FlowStatsReporter> PoolTicker for FuturePoolTicker<R> { fn on_tick(&mut self) { metrics::tls_flush(&self.reporter); } } /// Build respective thread pools to handle read commands of different priority levels. pub fn
<E: Engine, R: FlowStatsReporter>( config: &StorageReadPoolConfig, reporter: R, engine: E, ) -> Vec<FuturePool> { let names = vec!["store-read-low", "store-read-normal", "store-read-high"]; let configs: Vec<Config> = config.to_yatp_pool_configs(); assert_eq!(configs.len(), 3); configs .into_iter() .zip(names) .map(|(config, name)| { let reporter = reporter.clone(); let engine = Arc::new(Mutex::new(engine.clone())); YatpPoolBuilder::new(FuturePoolTicker { reporter }) .name_prefix(name) .config(config) .after_start(move || { set_tls_engine(engine.lock().unwrap().clone()); set_io_type(IOType::ForegroundRead); }) .before_stop(move || unsafe { // Safety: we call `set_` and `destroy_` with the same engine type. destroy_tls_engine::<E>(); }) .build_future_pool() }) .collect() } /// Build a thread pool that has default tick behavior for testing. pub fn build_read_pool_for_test<E: Engine>( config: &StorageReadPoolConfig, engine: E, ) -> Vec<FuturePool> { let names = vec!["store-read-low", "store-read-normal", "store-read-high"]; let configs: Vec<Config> = config.to_yatp_pool_configs(); assert_eq!(configs.len(), 3); configs .into_iter() .zip(names) .map(|(config, name)| { let engine = Arc::new(Mutex::new(engine.clone())); YatpPoolBuilder::new(DefaultTicker::default()) .config(config) .name_prefix(name) .after_start(move || { set_tls_engine(engine.lock().unwrap().clone()); set_io_type(IOType::ForegroundRead); }) // Safety: we call `set_` and `destroy_` with the same engine type. .before_stop(|| unsafe { destroy_tls_engine::<E>() }) .build_future_pool() }) .collect() }
build_read_pool
identifier_name
lib.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ #[macro_use] extern crate malloc_size_of_derive; use app_units::{Au, MAX_AU, MIN_AU}; use euclid::{ default::{Point2D, Rect, Size2D}, Length, }; use std::f32; use webrender_api::units::{FramebufferPixel, LayoutPoint, LayoutRect, LayoutSize}; // Units for use with euclid::length and euclid::scale_factor. pub type FramebufferUintLength = Length<u32, FramebufferPixel>; /// A normalized "pixel" at the default resolution for the display. /// /// Like the CSS "px" unit, the exact physical size of this unit may vary between devices, but it /// should approximate a device-independent reference length. This unit corresponds to Android's /// "density-independent pixel" (dip), Mac OS X's "point", and Windows "device-independent pixel." /// /// The relationship between DevicePixel and DeviceIndependentPixel is defined by the OS. On most low-dpi /// screens, one DeviceIndependentPixel is equal to one DevicePixel. But on high-density screens it can be /// some larger number. For example, by default on Apple "retina" displays, one DeviceIndependentPixel equals /// two DevicePixels. On Android "MDPI" displays, one DeviceIndependentPixel equals 1.5 device pixels. /// /// The ratio between DeviceIndependentPixel and DevicePixel for a given display be found by calling /// `servo::windowing::WindowMethods::hidpi_factor`. #[derive(Clone, Copy, Debug, MallocSizeOf)] pub enum DeviceIndependentPixel {} // An Au is an "App Unit" and represents 1/60th of a CSS pixel. It was // originally proposed in 2002 as a standard unit of measure in Gecko. // See https://bugzilla.mozilla.org/show_bug.cgi?id=177805 for more info. pub trait MaxRect { #[inline(always)] fn max_rect() -> Self; } impl MaxRect for Rect<Au> { fn
() -> Rect<Au> { Rect::new( Point2D::new(MIN_AU / 2, MIN_AU / 2), Size2D::new(MAX_AU, MAX_AU), ) } } impl MaxRect for LayoutRect { fn max_rect() -> LayoutRect { LayoutRect::new( LayoutPoint::new(f32::MIN / 2.0, f32::MIN / 2.0), LayoutSize::new(f32::MAX, f32::MAX), ) } } /// A helper function to convert a rect of `f32` pixels to a rect of app units. pub fn f32_rect_to_au_rect(rect: Rect<f32>) -> Rect<Au> { Rect::new( Point2D::new( Au::from_f32_px(rect.origin.x), Au::from_f32_px(rect.origin.y), ), Size2D::new( Au::from_f32_px(rect.size.width), Au::from_f32_px(rect.size.height), ), ) } /// A helper function to convert a rect of `Au` pixels to a rect of f32 units. pub fn au_rect_to_f32_rect(rect: Rect<Au>) -> Rect<f32> { Rect::new( Point2D::new(rect.origin.x.to_f32_px(), rect.origin.y.to_f32_px()), Size2D::new(rect.size.width.to_f32_px(), rect.size.height.to_f32_px()), ) }
max_rect
identifier_name
lib.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ #[macro_use] extern crate malloc_size_of_derive; use app_units::{Au, MAX_AU, MIN_AU}; use euclid::{ default::{Point2D, Rect, Size2D}, Length, }; use std::f32; use webrender_api::units::{FramebufferPixel, LayoutPoint, LayoutRect, LayoutSize}; // Units for use with euclid::length and euclid::scale_factor. pub type FramebufferUintLength = Length<u32, FramebufferPixel>; /// A normalized "pixel" at the default resolution for the display. /// /// Like the CSS "px" unit, the exact physical size of this unit may vary between devices, but it /// should approximate a device-independent reference length. This unit corresponds to Android's /// "density-independent pixel" (dip), Mac OS X's "point", and Windows "device-independent pixel." /// /// The relationship between DevicePixel and DeviceIndependentPixel is defined by the OS. On most low-dpi /// screens, one DeviceIndependentPixel is equal to one DevicePixel. But on high-density screens it can be /// some larger number. For example, by default on Apple "retina" displays, one DeviceIndependentPixel equals /// two DevicePixels. On Android "MDPI" displays, one DeviceIndependentPixel equals 1.5 device pixels. /// /// The ratio between DeviceIndependentPixel and DevicePixel for a given display be found by calling /// `servo::windowing::WindowMethods::hidpi_factor`. #[derive(Clone, Copy, Debug, MallocSizeOf)] pub enum DeviceIndependentPixel {} // An Au is an "App Unit" and represents 1/60th of a CSS pixel. It was // originally proposed in 2002 as a standard unit of measure in Gecko. // See https://bugzilla.mozilla.org/show_bug.cgi?id=177805 for more info. pub trait MaxRect { #[inline(always)] fn max_rect() -> Self; } impl MaxRect for Rect<Au> { fn max_rect() -> Rect<Au> { Rect::new( Point2D::new(MIN_AU / 2, MIN_AU / 2), Size2D::new(MAX_AU, MAX_AU), ) } } impl MaxRect for LayoutRect { fn max_rect() -> LayoutRect {
LayoutSize::new(f32::MAX, f32::MAX), ) } } /// A helper function to convert a rect of `f32` pixels to a rect of app units. pub fn f32_rect_to_au_rect(rect: Rect<f32>) -> Rect<Au> { Rect::new( Point2D::new( Au::from_f32_px(rect.origin.x), Au::from_f32_px(rect.origin.y), ), Size2D::new( Au::from_f32_px(rect.size.width), Au::from_f32_px(rect.size.height), ), ) } /// A helper function to convert a rect of `Au` pixels to a rect of f32 units. pub fn au_rect_to_f32_rect(rect: Rect<Au>) -> Rect<f32> { Rect::new( Point2D::new(rect.origin.x.to_f32_px(), rect.origin.y.to_f32_px()), Size2D::new(rect.size.width.to_f32_px(), rect.size.height.to_f32_px()), ) }
LayoutRect::new( LayoutPoint::new(f32::MIN / 2.0, f32::MIN / 2.0),
random_line_split
lib.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ #[macro_use] extern crate malloc_size_of_derive; use app_units::{Au, MAX_AU, MIN_AU}; use euclid::{ default::{Point2D, Rect, Size2D}, Length, }; use std::f32; use webrender_api::units::{FramebufferPixel, LayoutPoint, LayoutRect, LayoutSize}; // Units for use with euclid::length and euclid::scale_factor. pub type FramebufferUintLength = Length<u32, FramebufferPixel>; /// A normalized "pixel" at the default resolution for the display. /// /// Like the CSS "px" unit, the exact physical size of this unit may vary between devices, but it /// should approximate a device-independent reference length. This unit corresponds to Android's /// "density-independent pixel" (dip), Mac OS X's "point", and Windows "device-independent pixel." /// /// The relationship between DevicePixel and DeviceIndependentPixel is defined by the OS. On most low-dpi /// screens, one DeviceIndependentPixel is equal to one DevicePixel. But on high-density screens it can be /// some larger number. For example, by default on Apple "retina" displays, one DeviceIndependentPixel equals /// two DevicePixels. On Android "MDPI" displays, one DeviceIndependentPixel equals 1.5 device pixels. /// /// The ratio between DeviceIndependentPixel and DevicePixel for a given display be found by calling /// `servo::windowing::WindowMethods::hidpi_factor`. #[derive(Clone, Copy, Debug, MallocSizeOf)] pub enum DeviceIndependentPixel {} // An Au is an "App Unit" and represents 1/60th of a CSS pixel. It was // originally proposed in 2002 as a standard unit of measure in Gecko. // See https://bugzilla.mozilla.org/show_bug.cgi?id=177805 for more info. pub trait MaxRect { #[inline(always)] fn max_rect() -> Self; } impl MaxRect for Rect<Au> { fn max_rect() -> Rect<Au>
} impl MaxRect for LayoutRect { fn max_rect() -> LayoutRect { LayoutRect::new( LayoutPoint::new(f32::MIN / 2.0, f32::MIN / 2.0), LayoutSize::new(f32::MAX, f32::MAX), ) } } /// A helper function to convert a rect of `f32` pixels to a rect of app units. pub fn f32_rect_to_au_rect(rect: Rect<f32>) -> Rect<Au> { Rect::new( Point2D::new( Au::from_f32_px(rect.origin.x), Au::from_f32_px(rect.origin.y), ), Size2D::new( Au::from_f32_px(rect.size.width), Au::from_f32_px(rect.size.height), ), ) } /// A helper function to convert a rect of `Au` pixels to a rect of f32 units. pub fn au_rect_to_f32_rect(rect: Rect<Au>) -> Rect<f32> { Rect::new( Point2D::new(rect.origin.x.to_f32_px(), rect.origin.y.to_f32_px()), Size2D::new(rect.size.width.to_f32_px(), rect.size.height.to_f32_px()), ) }
{ Rect::new( Point2D::new(MIN_AU / 2, MIN_AU / 2), Size2D::new(MAX_AU, MAX_AU), ) }
identifier_body
inotify.rs
//! Monitoring API for filesystem events. //! //! Inotify is a Linux-only API to monitor filesystems events. //! //! For more documentation, please read [inotify(7)](https://man7.org/linux/man-pages/man7/inotify.7.html). //! //! # Examples //! //! Monitor all events happening in directory "test": //! ```no_run //! # use nix::sys::inotify::{AddWatchFlags,InitFlags,Inotify}; //! # //! // We create a new inotify instance. //! let instance = Inotify::init(InitFlags::empty()).unwrap(); //! //! // We add a new watch on directory "test" for all events. //! let wd = instance.add_watch("test", AddWatchFlags::IN_ALL_EVENTS).unwrap(); //! //! loop { //! // We read from our inotify instance for events. //! let events = instance.read_events().unwrap(); //! println!("Events: {:?}", events); //! } //! ``` use libc::{ c_char, c_int, }; use std::ffi::{OsString,OsStr,CStr}; use std::os::unix::ffi::OsStrExt; use std::mem::{MaybeUninit, size_of}; use std::os::unix::io::{RawFd,AsRawFd,FromRawFd}; use std::ptr; use crate::unistd::read; use crate::Result; use crate::NixPath; use crate::errno::Errno; use cfg_if::cfg_if; libc_bitflags! { /// Configuration options for [`inotify_add_watch`](fn.inotify_add_watch.html). pub struct AddWatchFlags: u32 { /// File was accessed. IN_ACCESS; /// File was modified. IN_MODIFY; /// Metadata changed. IN_ATTRIB; /// Writable file was closed. IN_CLOSE_WRITE; /// Nonwritable file was closed. IN_CLOSE_NOWRITE; /// File was opened. IN_OPEN; /// File was moved from X. IN_MOVED_FROM; /// File was moved to Y. IN_MOVED_TO; /// Subfile was created. IN_CREATE; /// Subfile was deleted. IN_DELETE; /// Self was deleted. IN_DELETE_SELF; /// Self was moved. IN_MOVE_SELF; /// Backing filesystem was unmounted. IN_UNMOUNT; /// Event queue overflowed. IN_Q_OVERFLOW; /// File was ignored. IN_IGNORED; /// Combination of `IN_CLOSE_WRITE` and `IN_CLOSE_NOWRITE`. IN_CLOSE; /// Combination of `IN_MOVED_FROM` and `IN_MOVED_TO`. IN_MOVE; /// Only watch the path if it is a directory. IN_ONLYDIR; /// Don't follow symlinks. IN_DONT_FOLLOW; /// Event occurred against directory. IN_ISDIR; /// Only send event once. IN_ONESHOT; /// All of the events. IN_ALL_EVENTS; } } libc_bitflags! { /// Configuration options for [`inotify_init1`](fn.inotify_init1.html). pub struct InitFlags: c_int { /// Set the `FD_CLOEXEC` flag on the file descriptor. IN_CLOEXEC; /// Set the `O_NONBLOCK` flag on the open file description referred to by the new file descriptor. IN_NONBLOCK; } } /// An inotify instance. This is also a file descriptor, you can feed it to /// other interfaces consuming file descriptors, epoll for example. #[derive(Debug, Clone, Copy)] pub struct Inotify { fd: RawFd } /// This object is returned when you create a new watch on an inotify instance. /// It is then returned as part of an event once triggered. It allows you to /// know which watch triggered which event. #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq, Ord, PartialOrd)] pub struct WatchDescriptor { wd: i32 } /// A single inotify event. /// /// For more documentation see, [inotify(7)](https://man7.org/linux/man-pages/man7/inotify.7.html). #[derive(Debug)] pub struct InotifyEvent { /// Watch descriptor. This field corresponds to the watch descriptor you /// were issued when calling add_watch. It allows you to know which watch /// this event comes from. pub wd: WatchDescriptor, /// Event mask. This field is a bitfield describing the exact event that /// occured. pub mask: AddWatchFlags, /// This cookie is a number that allows you to connect related events. For /// now only IN_MOVED_FROM and IN_MOVED_TO can be connected. pub cookie: u32, /// Filename. This field exists only if the event was triggered for a file /// inside the watched directory. pub name: Option<OsString> } impl Inotify { /// Initialize a new inotify instance. /// /// Returns a Result containing an inotify instance. /// /// For more information see, [inotify_init(2)](https://man7.org/linux/man-pages/man2/inotify_init.2.html). pub fn init(flags: InitFlags) -> Result<Inotify> { let res = Errno::result(unsafe { libc::inotify_init1(flags.bits()) }); res.map(|fd| Inotify { fd }) } /// Adds a new watch on the target file or directory. /// /// Returns a watch descriptor. This is not a File Descriptor! /// /// For more information see, [inotify_add_watch(2)](https://man7.org/linux/man-pages/man2/inotify_add_watch.2.html). pub fn add_watch<P:?Sized + NixPath>(self, path: &P, mask: AddWatchFlags) -> Result<WatchDescriptor> { let res = path.with_nix_path(|cstr| { unsafe { libc::inotify_add_watch(self.fd, cstr.as_ptr(), mask.bits()) } })?; Errno::result(res).map(|wd| WatchDescriptor { wd }) } /// Removes an existing watch using the watch descriptor returned by /// inotify_add_watch. /// /// Returns an EINVAL error if the watch descriptor is invalid. /// /// For more information see, [inotify_rm_watch(2)](https://man7.org/linux/man-pages/man2/inotify_rm_watch.2.html). pub fn rm_watch(self, wd: WatchDescriptor) -> Result<()> { cfg_if! { if #[cfg(target_os = "linux")] { let arg = wd.wd; } else if #[cfg(target_os = "android")] { let arg = wd.wd as u32; } } let res = unsafe { libc::inotify_rm_watch(self.fd, arg) }; Errno::result(res).map(drop) } /// Reads a collection of events from the inotify file descriptor. This call /// can either be blocking or non blocking depending on whether IN_NONBLOCK /// was set at initialization. /// /// Returns as many events as available. If the call was non blocking and no /// events could be read then the EAGAIN error is returned. pub fn read_events(self) -> Result<Vec<InotifyEvent>> { let header_size = size_of::<libc::inotify_event>(); const BUFSIZ: usize = 4096; let mut buffer = [0u8; BUFSIZ]; let mut events = Vec::new(); let mut offset = 0; let nread = read(self.fd, &mut buffer)?; while (nread - offset) >= header_size { let event = unsafe { let mut event = MaybeUninit::<libc::inotify_event>::uninit(); ptr::copy_nonoverlapping( buffer.as_ptr().add(offset), event.as_mut_ptr() as *mut u8, (BUFSIZ - offset).min(header_size) ); event.assume_init() }; let name = match event.len { 0 => None, _ => { let ptr = unsafe { buffer .as_ptr() .add(offset + header_size) as *const c_char }; let cstr = unsafe { CStr::from_ptr(ptr) }; Some(OsStr::from_bytes(cstr.to_bytes()).to_owned()) } }; events.push(InotifyEvent { wd: WatchDescriptor { wd: event.wd }, mask: AddWatchFlags::from_bits_truncate(event.mask), cookie: event.cookie, name }); offset += header_size + event.len as usize; } Ok(events) } } impl AsRawFd for Inotify { fn as_raw_fd(&self) -> RawFd { self.fd } } impl FromRawFd for Inotify { unsafe fn from_raw_fd(fd: RawFd) -> Self
}
{ Inotify { fd } }
identifier_body
inotify.rs
//! Monitoring API for filesystem events. //! //! Inotify is a Linux-only API to monitor filesystems events. //! //! For more documentation, please read [inotify(7)](https://man7.org/linux/man-pages/man7/inotify.7.html). //! //! # Examples //! //! Monitor all events happening in directory "test": //! ```no_run //! # use nix::sys::inotify::{AddWatchFlags,InitFlags,Inotify}; //! # //! // We create a new inotify instance. //! let instance = Inotify::init(InitFlags::empty()).unwrap(); //! //! // We add a new watch on directory "test" for all events. //! let wd = instance.add_watch("test", AddWatchFlags::IN_ALL_EVENTS).unwrap(); //! //! loop { //! // We read from our inotify instance for events. //! let events = instance.read_events().unwrap(); //! println!("Events: {:?}", events); //! } //! ``` use libc::{ c_char, c_int, }; use std::ffi::{OsString,OsStr,CStr}; use std::os::unix::ffi::OsStrExt; use std::mem::{MaybeUninit, size_of}; use std::os::unix::io::{RawFd,AsRawFd,FromRawFd}; use std::ptr; use crate::unistd::read; use crate::Result; use crate::NixPath; use crate::errno::Errno; use cfg_if::cfg_if; libc_bitflags! { /// Configuration options for [`inotify_add_watch`](fn.inotify_add_watch.html). pub struct AddWatchFlags: u32 { /// File was accessed. IN_ACCESS; /// File was modified. IN_MODIFY; /// Metadata changed. IN_ATTRIB; /// Writable file was closed. IN_CLOSE_WRITE; /// Nonwritable file was closed. IN_CLOSE_NOWRITE; /// File was opened. IN_OPEN; /// File was moved from X. IN_MOVED_FROM; /// File was moved to Y. IN_MOVED_TO; /// Subfile was created. IN_CREATE; /// Subfile was deleted. IN_DELETE; /// Self was deleted. IN_DELETE_SELF; /// Self was moved. IN_MOVE_SELF; /// Backing filesystem was unmounted. IN_UNMOUNT; /// Event queue overflowed. IN_Q_OVERFLOW; /// File was ignored. IN_IGNORED; /// Combination of `IN_CLOSE_WRITE` and `IN_CLOSE_NOWRITE`. IN_CLOSE; /// Combination of `IN_MOVED_FROM` and `IN_MOVED_TO`. IN_MOVE; /// Only watch the path if it is a directory. IN_ONLYDIR; /// Don't follow symlinks. IN_DONT_FOLLOW; /// Event occurred against directory. IN_ISDIR; /// Only send event once. IN_ONESHOT; /// All of the events. IN_ALL_EVENTS; } } libc_bitflags! { /// Configuration options for [`inotify_init1`](fn.inotify_init1.html). pub struct InitFlags: c_int { /// Set the `FD_CLOEXEC` flag on the file descriptor. IN_CLOEXEC; /// Set the `O_NONBLOCK` flag on the open file description referred to by the new file descriptor. IN_NONBLOCK; } } /// An inotify instance. This is also a file descriptor, you can feed it to /// other interfaces consuming file descriptors, epoll for example. #[derive(Debug, Clone, Copy)] pub struct Inotify { fd: RawFd } /// This object is returned when you create a new watch on an inotify instance. /// It is then returned as part of an event once triggered. It allows you to /// know which watch triggered which event. #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq, Ord, PartialOrd)] pub struct WatchDescriptor { wd: i32 } /// A single inotify event. /// /// For more documentation see, [inotify(7)](https://man7.org/linux/man-pages/man7/inotify.7.html). #[derive(Debug)] pub struct InotifyEvent { /// Watch descriptor. This field corresponds to the watch descriptor you /// were issued when calling add_watch. It allows you to know which watch /// this event comes from. pub wd: WatchDescriptor, /// Event mask. This field is a bitfield describing the exact event that /// occured. pub mask: AddWatchFlags, /// This cookie is a number that allows you to connect related events. For /// now only IN_MOVED_FROM and IN_MOVED_TO can be connected. pub cookie: u32, /// Filename. This field exists only if the event was triggered for a file /// inside the watched directory. pub name: Option<OsString> } impl Inotify { /// Initialize a new inotify instance. /// /// Returns a Result containing an inotify instance. /// /// For more information see, [inotify_init(2)](https://man7.org/linux/man-pages/man2/inotify_init.2.html). pub fn init(flags: InitFlags) -> Result<Inotify> { let res = Errno::result(unsafe { libc::inotify_init1(flags.bits()) }); res.map(|fd| Inotify { fd }) } /// Adds a new watch on the target file or directory. /// /// Returns a watch descriptor. This is not a File Descriptor! /// /// For more information see, [inotify_add_watch(2)](https://man7.org/linux/man-pages/man2/inotify_add_watch.2.html). pub fn add_watch<P:?Sized + NixPath>(self, path: &P, mask: AddWatchFlags) -> Result<WatchDescriptor> { let res = path.with_nix_path(|cstr| { unsafe { libc::inotify_add_watch(self.fd, cstr.as_ptr(), mask.bits()) } })?; Errno::result(res).map(|wd| WatchDescriptor { wd }) } /// Removes an existing watch using the watch descriptor returned by /// inotify_add_watch. /// /// Returns an EINVAL error if the watch descriptor is invalid. /// /// For more information see, [inotify_rm_watch(2)](https://man7.org/linux/man-pages/man2/inotify_rm_watch.2.html). pub fn rm_watch(self, wd: WatchDescriptor) -> Result<()> { cfg_if! { if #[cfg(target_os = "linux")] { let arg = wd.wd; } else if #[cfg(target_os = "android")] { let arg = wd.wd as u32; } } let res = unsafe { libc::inotify_rm_watch(self.fd, arg) }; Errno::result(res).map(drop) } /// Reads a collection of events from the inotify file descriptor. This call /// can either be blocking or non blocking depending on whether IN_NONBLOCK /// was set at initialization. /// /// Returns as many events as available. If the call was non blocking and no /// events could be read then the EAGAIN error is returned. pub fn read_events(self) -> Result<Vec<InotifyEvent>> { let header_size = size_of::<libc::inotify_event>(); const BUFSIZ: usize = 4096; let mut buffer = [0u8; BUFSIZ]; let mut events = Vec::new(); let mut offset = 0; let nread = read(self.fd, &mut buffer)?; while (nread - offset) >= header_size { let event = unsafe { let mut event = MaybeUninit::<libc::inotify_event>::uninit(); ptr::copy_nonoverlapping( buffer.as_ptr().add(offset), event.as_mut_ptr() as *mut u8, (BUFSIZ - offset).min(header_size) ); event.assume_init() }; let name = match event.len { 0 => None, _ =>
}; events.push(InotifyEvent { wd: WatchDescriptor { wd: event.wd }, mask: AddWatchFlags::from_bits_truncate(event.mask), cookie: event.cookie, name }); offset += header_size + event.len as usize; } Ok(events) } } impl AsRawFd for Inotify { fn as_raw_fd(&self) -> RawFd { self.fd } } impl FromRawFd for Inotify { unsafe fn from_raw_fd(fd: RawFd) -> Self { Inotify { fd } } }
{ let ptr = unsafe { buffer .as_ptr() .add(offset + header_size) as *const c_char }; let cstr = unsafe { CStr::from_ptr(ptr) }; Some(OsStr::from_bytes(cstr.to_bytes()).to_owned()) }
conditional_block
inotify.rs
//! Monitoring API for filesystem events. //! //! Inotify is a Linux-only API to monitor filesystems events. //! //! For more documentation, please read [inotify(7)](https://man7.org/linux/man-pages/man7/inotify.7.html). //! //! # Examples //! //! Monitor all events happening in directory "test": //! ```no_run //! # use nix::sys::inotify::{AddWatchFlags,InitFlags,Inotify}; //! # //! // We create a new inotify instance. //! let instance = Inotify::init(InitFlags::empty()).unwrap(); //! //! // We add a new watch on directory "test" for all events. //! let wd = instance.add_watch("test", AddWatchFlags::IN_ALL_EVENTS).unwrap(); //! //! loop { //! // We read from our inotify instance for events. //! let events = instance.read_events().unwrap(); //! println!("Events: {:?}", events); //! } //! ``` use libc::{ c_char, c_int, }; use std::ffi::{OsString,OsStr,CStr}; use std::os::unix::ffi::OsStrExt; use std::mem::{MaybeUninit, size_of}; use std::os::unix::io::{RawFd,AsRawFd,FromRawFd}; use std::ptr; use crate::unistd::read; use crate::Result; use crate::NixPath; use crate::errno::Errno; use cfg_if::cfg_if; libc_bitflags! { /// Configuration options for [`inotify_add_watch`](fn.inotify_add_watch.html). pub struct AddWatchFlags: u32 { /// File was accessed. IN_ACCESS; /// File was modified. IN_MODIFY; /// Metadata changed. IN_ATTRIB; /// Writable file was closed. IN_CLOSE_WRITE; /// Nonwritable file was closed. IN_CLOSE_NOWRITE; /// File was opened. IN_OPEN; /// File was moved from X. IN_MOVED_FROM; /// File was moved to Y. IN_MOVED_TO; /// Subfile was created. IN_CREATE; /// Subfile was deleted. IN_DELETE; /// Self was deleted. IN_DELETE_SELF; /// Self was moved. IN_MOVE_SELF; /// Backing filesystem was unmounted. IN_UNMOUNT; /// Event queue overflowed. IN_Q_OVERFLOW; /// File was ignored. IN_IGNORED; /// Combination of `IN_CLOSE_WRITE` and `IN_CLOSE_NOWRITE`. IN_CLOSE; /// Combination of `IN_MOVED_FROM` and `IN_MOVED_TO`. IN_MOVE; /// Only watch the path if it is a directory. IN_ONLYDIR; /// Don't follow symlinks. IN_DONT_FOLLOW; /// Event occurred against directory. IN_ISDIR; /// Only send event once. IN_ONESHOT; /// All of the events. IN_ALL_EVENTS; } } libc_bitflags! { /// Configuration options for [`inotify_init1`](fn.inotify_init1.html). pub struct InitFlags: c_int { /// Set the `FD_CLOEXEC` flag on the file descriptor. IN_CLOEXEC; /// Set the `O_NONBLOCK` flag on the open file description referred to by the new file descriptor. IN_NONBLOCK; } } /// An inotify instance. This is also a file descriptor, you can feed it to /// other interfaces consuming file descriptors, epoll for example. #[derive(Debug, Clone, Copy)] pub struct Inotify { fd: RawFd } /// This object is returned when you create a new watch on an inotify instance. /// It is then returned as part of an event once triggered. It allows you to /// know which watch triggered which event. #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq, Ord, PartialOrd)] pub struct WatchDescriptor { wd: i32 } /// A single inotify event. /// /// For more documentation see, [inotify(7)](https://man7.org/linux/man-pages/man7/inotify.7.html). #[derive(Debug)] pub struct
{ /// Watch descriptor. This field corresponds to the watch descriptor you /// were issued when calling add_watch. It allows you to know which watch /// this event comes from. pub wd: WatchDescriptor, /// Event mask. This field is a bitfield describing the exact event that /// occured. pub mask: AddWatchFlags, /// This cookie is a number that allows you to connect related events. For /// now only IN_MOVED_FROM and IN_MOVED_TO can be connected. pub cookie: u32, /// Filename. This field exists only if the event was triggered for a file /// inside the watched directory. pub name: Option<OsString> } impl Inotify { /// Initialize a new inotify instance. /// /// Returns a Result containing an inotify instance. /// /// For more information see, [inotify_init(2)](https://man7.org/linux/man-pages/man2/inotify_init.2.html). pub fn init(flags: InitFlags) -> Result<Inotify> { let res = Errno::result(unsafe { libc::inotify_init1(flags.bits()) }); res.map(|fd| Inotify { fd }) } /// Adds a new watch on the target file or directory. /// /// Returns a watch descriptor. This is not a File Descriptor! /// /// For more information see, [inotify_add_watch(2)](https://man7.org/linux/man-pages/man2/inotify_add_watch.2.html). pub fn add_watch<P:?Sized + NixPath>(self, path: &P, mask: AddWatchFlags) -> Result<WatchDescriptor> { let res = path.with_nix_path(|cstr| { unsafe { libc::inotify_add_watch(self.fd, cstr.as_ptr(), mask.bits()) } })?; Errno::result(res).map(|wd| WatchDescriptor { wd }) } /// Removes an existing watch using the watch descriptor returned by /// inotify_add_watch. /// /// Returns an EINVAL error if the watch descriptor is invalid. /// /// For more information see, [inotify_rm_watch(2)](https://man7.org/linux/man-pages/man2/inotify_rm_watch.2.html). pub fn rm_watch(self, wd: WatchDescriptor) -> Result<()> { cfg_if! { if #[cfg(target_os = "linux")] { let arg = wd.wd; } else if #[cfg(target_os = "android")] { let arg = wd.wd as u32; } } let res = unsafe { libc::inotify_rm_watch(self.fd, arg) }; Errno::result(res).map(drop) } /// Reads a collection of events from the inotify file descriptor. This call /// can either be blocking or non blocking depending on whether IN_NONBLOCK /// was set at initialization. /// /// Returns as many events as available. If the call was non blocking and no /// events could be read then the EAGAIN error is returned. pub fn read_events(self) -> Result<Vec<InotifyEvent>> { let header_size = size_of::<libc::inotify_event>(); const BUFSIZ: usize = 4096; let mut buffer = [0u8; BUFSIZ]; let mut events = Vec::new(); let mut offset = 0; let nread = read(self.fd, &mut buffer)?; while (nread - offset) >= header_size { let event = unsafe { let mut event = MaybeUninit::<libc::inotify_event>::uninit(); ptr::copy_nonoverlapping( buffer.as_ptr().add(offset), event.as_mut_ptr() as *mut u8, (BUFSIZ - offset).min(header_size) ); event.assume_init() }; let name = match event.len { 0 => None, _ => { let ptr = unsafe { buffer .as_ptr() .add(offset + header_size) as *const c_char }; let cstr = unsafe { CStr::from_ptr(ptr) }; Some(OsStr::from_bytes(cstr.to_bytes()).to_owned()) } }; events.push(InotifyEvent { wd: WatchDescriptor { wd: event.wd }, mask: AddWatchFlags::from_bits_truncate(event.mask), cookie: event.cookie, name }); offset += header_size + event.len as usize; } Ok(events) } } impl AsRawFd for Inotify { fn as_raw_fd(&self) -> RawFd { self.fd } } impl FromRawFd for Inotify { unsafe fn from_raw_fd(fd: RawFd) -> Self { Inotify { fd } } }
InotifyEvent
identifier_name
inotify.rs
//! Monitoring API for filesystem events. //! //! Inotify is a Linux-only API to monitor filesystems events. //! //! For more documentation, please read [inotify(7)](https://man7.org/linux/man-pages/man7/inotify.7.html). //! //! # Examples //! //! Monitor all events happening in directory "test": //! ```no_run //! # use nix::sys::inotify::{AddWatchFlags,InitFlags,Inotify}; //! # //! // We create a new inotify instance. //! let instance = Inotify::init(InitFlags::empty()).unwrap(); //! //! // We add a new watch on directory "test" for all events. //! let wd = instance.add_watch("test", AddWatchFlags::IN_ALL_EVENTS).unwrap(); //! //! loop { //! // We read from our inotify instance for events. //! let events = instance.read_events().unwrap(); //! println!("Events: {:?}", events); //! } //! ``` use libc::{ c_char, c_int, }; use std::ffi::{OsString,OsStr,CStr}; use std::os::unix::ffi::OsStrExt; use std::mem::{MaybeUninit, size_of}; use std::os::unix::io::{RawFd,AsRawFd,FromRawFd}; use std::ptr; use crate::unistd::read; use crate::Result; use crate::NixPath; use crate::errno::Errno; use cfg_if::cfg_if; libc_bitflags! { /// Configuration options for [`inotify_add_watch`](fn.inotify_add_watch.html). pub struct AddWatchFlags: u32 { /// File was accessed. IN_ACCESS; /// File was modified. IN_MODIFY; /// Metadata changed. IN_ATTRIB; /// Writable file was closed. IN_CLOSE_WRITE; /// Nonwritable file was closed. IN_CLOSE_NOWRITE; /// File was opened. IN_OPEN; /// File was moved from X. IN_MOVED_FROM; /// File was moved to Y. IN_MOVED_TO; /// Subfile was created. IN_CREATE; /// Subfile was deleted. IN_DELETE; /// Self was deleted. IN_DELETE_SELF; /// Self was moved. IN_MOVE_SELF; /// Backing filesystem was unmounted. IN_UNMOUNT; /// Event queue overflowed. IN_Q_OVERFLOW; /// File was ignored. IN_IGNORED; /// Combination of `IN_CLOSE_WRITE` and `IN_CLOSE_NOWRITE`. IN_CLOSE; /// Combination of `IN_MOVED_FROM` and `IN_MOVED_TO`. IN_MOVE; /// Only watch the path if it is a directory. IN_ONLYDIR; /// Don't follow symlinks. IN_DONT_FOLLOW; /// Event occurred against directory. IN_ISDIR; /// Only send event once. IN_ONESHOT; /// All of the events. IN_ALL_EVENTS; } } libc_bitflags! { /// Configuration options for [`inotify_init1`](fn.inotify_init1.html). pub struct InitFlags: c_int { /// Set the `FD_CLOEXEC` flag on the file descriptor. IN_CLOEXEC; /// Set the `O_NONBLOCK` flag on the open file description referred to by the new file descriptor. IN_NONBLOCK; } } /// An inotify instance. This is also a file descriptor, you can feed it to /// other interfaces consuming file descriptors, epoll for example. #[derive(Debug, Clone, Copy)] pub struct Inotify { fd: RawFd } /// This object is returned when you create a new watch on an inotify instance. /// It is then returned as part of an event once triggered. It allows you to /// know which watch triggered which event. #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq, Ord, PartialOrd)] pub struct WatchDescriptor { wd: i32 } /// A single inotify event. /// /// For more documentation see, [inotify(7)](https://man7.org/linux/man-pages/man7/inotify.7.html). #[derive(Debug)] pub struct InotifyEvent { /// Watch descriptor. This field corresponds to the watch descriptor you
pub mask: AddWatchFlags, /// This cookie is a number that allows you to connect related events. For /// now only IN_MOVED_FROM and IN_MOVED_TO can be connected. pub cookie: u32, /// Filename. This field exists only if the event was triggered for a file /// inside the watched directory. pub name: Option<OsString> } impl Inotify { /// Initialize a new inotify instance. /// /// Returns a Result containing an inotify instance. /// /// For more information see, [inotify_init(2)](https://man7.org/linux/man-pages/man2/inotify_init.2.html). pub fn init(flags: InitFlags) -> Result<Inotify> { let res = Errno::result(unsafe { libc::inotify_init1(flags.bits()) }); res.map(|fd| Inotify { fd }) } /// Adds a new watch on the target file or directory. /// /// Returns a watch descriptor. This is not a File Descriptor! /// /// For more information see, [inotify_add_watch(2)](https://man7.org/linux/man-pages/man2/inotify_add_watch.2.html). pub fn add_watch<P:?Sized + NixPath>(self, path: &P, mask: AddWatchFlags) -> Result<WatchDescriptor> { let res = path.with_nix_path(|cstr| { unsafe { libc::inotify_add_watch(self.fd, cstr.as_ptr(), mask.bits()) } })?; Errno::result(res).map(|wd| WatchDescriptor { wd }) } /// Removes an existing watch using the watch descriptor returned by /// inotify_add_watch. /// /// Returns an EINVAL error if the watch descriptor is invalid. /// /// For more information see, [inotify_rm_watch(2)](https://man7.org/linux/man-pages/man2/inotify_rm_watch.2.html). pub fn rm_watch(self, wd: WatchDescriptor) -> Result<()> { cfg_if! { if #[cfg(target_os = "linux")] { let arg = wd.wd; } else if #[cfg(target_os = "android")] { let arg = wd.wd as u32; } } let res = unsafe { libc::inotify_rm_watch(self.fd, arg) }; Errno::result(res).map(drop) } /// Reads a collection of events from the inotify file descriptor. This call /// can either be blocking or non blocking depending on whether IN_NONBLOCK /// was set at initialization. /// /// Returns as many events as available. If the call was non blocking and no /// events could be read then the EAGAIN error is returned. pub fn read_events(self) -> Result<Vec<InotifyEvent>> { let header_size = size_of::<libc::inotify_event>(); const BUFSIZ: usize = 4096; let mut buffer = [0u8; BUFSIZ]; let mut events = Vec::new(); let mut offset = 0; let nread = read(self.fd, &mut buffer)?; while (nread - offset) >= header_size { let event = unsafe { let mut event = MaybeUninit::<libc::inotify_event>::uninit(); ptr::copy_nonoverlapping( buffer.as_ptr().add(offset), event.as_mut_ptr() as *mut u8, (BUFSIZ - offset).min(header_size) ); event.assume_init() }; let name = match event.len { 0 => None, _ => { let ptr = unsafe { buffer .as_ptr() .add(offset + header_size) as *const c_char }; let cstr = unsafe { CStr::from_ptr(ptr) }; Some(OsStr::from_bytes(cstr.to_bytes()).to_owned()) } }; events.push(InotifyEvent { wd: WatchDescriptor { wd: event.wd }, mask: AddWatchFlags::from_bits_truncate(event.mask), cookie: event.cookie, name }); offset += header_size + event.len as usize; } Ok(events) } } impl AsRawFd for Inotify { fn as_raw_fd(&self) -> RawFd { self.fd } } impl FromRawFd for Inotify { unsafe fn from_raw_fd(fd: RawFd) -> Self { Inotify { fd } } }
/// were issued when calling add_watch. It allows you to know which watch /// this event comes from. pub wd: WatchDescriptor, /// Event mask. This field is a bitfield describing the exact event that /// occured.
random_line_split
error.rs
//! Postgres errors use std::collections::HashMap; use std::io; use std::fmt; use openssl::ssl::error; use phf::PhfMap; use PostgresResult; use types::PostgresType; macro_rules! make_errors( ($($code:expr => $error:ident),+) => ( /// SQLSTATE error codes #[deriving(PartialEq, Eq, Clone, Show)] #[allow(missing_doc)] pub enum PostgresSqlState { $($error,)+ UnknownSqlState(String) } static STATE_MAP: PhfMap<&'static str, PostgresSqlState> = phf_map!( $($code => $error),+ ); impl PostgresSqlState { #[doc(hidden)] pub fn from_code(s: &str) -> PostgresSqlState { match STATE_MAP.find_equiv(&s) { Some(state) => state.clone(), None => UnknownSqlState(s.into_string()) } } } ) ) // From http://www.postgresql.org/docs/9.2/static/errcodes-appendix.html make_errors!( // Class 00 — Successful Completion "00000" => SuccessfulCompletion, // Class 01 — Warning "01000" => Warning, "0100C" => DynamicResultSetsReturned, "01008" => ImplicitZeroBitPadding, "01003" => NullValueEliminatedInSetFunction, "01007" => PrivilegeNotGranted, "01006" => PrivilegeNotRevoked, "01004" => StringDataRightTruncationWarning, "01P01" => DeprecatedFeature, // Class 02 — No Data "02000" => NoData, "02001" => NoAdditionalDynamicResultSetsReturned, // Class 03 — SQL Statement Not Yet Complete "03000" => SqlStatementNotYetComplete, // Class 08 — Connection Exception "08000" => ConnectionException, "08003" => ConnectionDoesNotExist, "08006" => ConnectionFailure, "08001" => SqlclientUnableToEstablishSqlconnection, "08004" => SqlserverRejectedEstablishmentOfSqlconnection, "08007" => TransactionResolutionUnknown, "08P01" => ProtocolViolation, // Class 09 — Triggered Action Exception "09000" => TriggeredActionException, // Class 0A — Feature Not Supported "0A000" => FeatureNotSupported, // Class 0B — Invalid Transaction Initiation "0B000" => InvalidTransactionInitiation, // Class 0F — Locator Exception "0F000" => LocatorException, "0F001" => InvalidLocatorException, // Class 0L — Invalid Grantor "0L000" => InvalidGrantor, "0LP01" => InvalidGrantOperation, // Class 0P — Invalid Role Specification "0P000" => InvalidRoleSpecification, // Class 0Z — Diagnostics Exception "0Z000" => DiagnosticsException, "0Z002" => StackedDiagnosticsAccessedWithoutActiveHandler, // Class 20 — Case Not Found "20000" => CaseNotFound, // Class 21 — Cardinality Violation "21000" => CardinalityViolation, // Class 22 — Data Exception "22000" => DataException, "2202E" => ArraySubscriptError, "22021" => CharacterNotInRepertoire, "22008" => DatetimeFieldOverflow, "22012" => DivisionByZero, "22005" => ErrorInAssignment, "2200B" => EscapeCharacterConflict, "22022" => IndicatorOverflow, "22015" => IntervalFieldOverflow, "2201E" => InvalidArgumentForLogarithm, "22014" => InvalidArgumentForNtileFunction, "22016" => InvalidArgumentForNthValueFunction, "2201F" => InvalidArgumentForPowerFunction, "2201G" => InvalidArgumentForWidthBucketFunction, "22018" => InvalidCharacterValueForCast, "22007" => InvalidDatetimeFormat, "22019" => InvalidEscapeCharacter, "2200D" => InvalidEscapeOctet, "22025" => InvalidEscapeSequence, "22P06" => NonstandardUseOfEscapeCharacter, "22010" => InvalidIndicatorParameterValue, "22023" => InvalidParameterValue, "2201B" => InvalidRegularExpression, "2201W" => InvalidRowCountInLimitClause, "2201X" => InvalidRowCountInResultOffsetClause, "22009" => InvalidTimeZoneDisplacementValue, "2200C" => InvalidUseOfEscapeCharacter, "2200G" => MostSpecificTypeMismatch, "22004" => NullValueNotAllowedData, "22002" => NullValueNoIndicatorParameter, "22003" => NumericValueOutOfRange, "22026" => StringDataLengthMismatch, "22001" => StringDataRightTruncationException, "22011" => SubstringError, "22027" => TrimError, "22024" => UnterminatedCString, "2200F" => ZeroLengthCharacterString, "22P01" => FloatingPointException, "22P02" => InvalidTextRepresentation, "22P03" => InvalidBinaryRepresentation, "22P04" => BadCopyFileFormat, "22P05" => UntranslatableCharacter, "2200L" => NotAnXmlDocument, "2200M" => InvalidXmlDocument, "2200N" => InvalidXmlContent, "2200S" => InvalidXmlComment, "2200T" => InvalidXmlProcessingInstruction, // Class 23 — Integrity Constraint Violation "23000" => IntegrityConstraintViolation, "23001" => RestrictViolation, "23502" => NotNullViolation, "23503" => ForeignKeyViolation, "23505" => UniqueViolation, "23514" => CheckViolation, "32P01" => ExclusionViolation, // Class 24 — Invalid Cursor State "24000" => InvalidCursorState, // Class 25 — Invalid Transaction State "25000" => InvalidTransactionState, "25001" => ActiveSqlTransaction, "25002" => BranchTransactionAlreadyActive, "25008" => HeldCursorRequiresSameIsolationLevel, "25003" => InappropriateAccessModeForBranchTransaction, "25004" => InappropriateIsolationLevelForBranchTransaction, "25005" => NoActiveSqlTransactionForBranchTransaction, "25006" => ReadOnlySqlTransaction, "25007" => SchemaAndDataStatementMixingNotSupported, "25P01" => NoActiveSqlTransaction, "25P02" => InFailedSqlTransaction, // Class 26 — Invalid SQL Statement Name "26000" => InvalidSqlStatementName, // Class 27 — Triggered Data Change Violation "27000" => TriggeredDataChangeViolation, // Class 28 — Invalid Authorization Specification "28000" => InvalidAuthorizationSpecification, "28P01" => InvalidPassword, // Class 2B — Dependent Privilege Descriptors Still Exist "2B000" => DependentPrivilegeDescriptorsStillExist, "2BP01" => DependentObjectsStillExist, // Class 2D — Invalid Transaction Termination "2D000" => InvalidTransactionTermination, // Class 2F — SQL Routine Exception "2F000" => SqlRoutineException, "2F005" => FunctionExecutedNoReturnStatement, "2F002" => ModifyingSqlDataNotPermittedSqlRoutine, "2F003" => ProhibitedSqlStatementAttemptedSqlRoutine, "2F004" => ReadingSqlDataNotPermittedSqlRoutine, // Class 34 — Invalid Cursor Name "34000" => InvalidCursorName, // Class 38 — External Routine Exception "38000" => ExternalRoutineException, "38001" => ContainingSqlNotPermitted, "38002" => ModifyingSqlDataNotPermittedExternalRoutine, "38003" => ProhibitedSqlStatementAttemptedExternalRoutine, "38004" => ReadingSqlDataNotPermittedExternalRoutine, // Class 39 — External Routine Invocation Exception "39000" => ExternalRoutineInvocationException, "39001" => InvalidSqlstateReturned, "39004" => NullValueNotAllowedExternalRoutine, "39P01" => TriggerProtocolViolated, "39P02" => SrfProtocolViolated, // Class 3B — Savepoint Exception "3B000" => SavepointException, "3B001" => InvalidSavepointException, // Class 3D — Invalid Catalog Name "3D000" => InvalidCatalogName, // Class 3F — Invalid Schema Name "3F000" => InvalidSchemaName, // Class 40 — Transaction Rollback "40000" => TransactionRollback, "40002" => TransactionIntegrityConstraintViolation, "40001" => SerializationFailure, "40003" => StatementCompletionUnknown, "40P01" => DeadlockDetected, // Class 42 — Syntax Error or Access Rule Violation "42000" => SyntaxErrorOrAccessRuleViolation, "42601" => SyntaxError, "42501" => InsufficientPrivilege, "42846" => CannotCoerce, "42803" => GroupingError, "42P20" => WindowingError, "42P19" => InvalidRecursion, "42830" => InvalidForeignKey, "42602" => InvalidName, "42622" => NameTooLong, "42939" => ReservedName, "42804" => DatatypeMismatch, "42P18" => IndeterminateDatatype, "42P21" => CollationMismatch, "42P22" => IndeterminateCollation, "42809" => WrongObjectType, "42703" => UndefinedColumn, "42883" => UndefinedFunction, "42P01" => UndefinedTable, "42P02" => UndefinedParameter, "42704" => UndefinedObject, "42701" => DuplicateColumn, "42P03" => DuplicateCursor, "42P04" => DuplicateDatabase, "42723" => DuplicateFunction, "42P05" => DuplicatePreparedStatement, "42P06" => DuplicateSchema, "42P07" => DuplicateTable, "42712" => DuplicateAliaas, "42710" => DuplicateObject, "42702" => AmbiguousColumn, "42725" => AmbiguousFunction, "42P08" => AmbiguousParameter, "42P09" => AmbiguousAlias, "42P10" => InvalidColumnReference, "42611" => InvalidColumnDefinition, "42P11" => InvalidCursorDefinition, "42P12" => InvalidDatabaseDefinition, "42P13" => InvalidFunctionDefinition, "42P14" => InvalidPreparedStatementDefinition, "42P15" => InvalidSchemaDefinition, "42P16" => InvalidTableDefinition, "42P17" => InvalidObjectDefinition, // Class 44 — WITH CHECK OPTION Violation "44000" => WithCheckOptionViolation, // Class 53 — Insufficient Resources "53000" => InsufficientResources, "53100" => DiskFull, "53200" => OutOfMemory, "53300" => TooManyConnections, "53400" => ConfigurationLimitExceeded, // Class 54 — Program Limit Exceeded "54000" => ProgramLimitExceeded, "54001" => StatementTooComplex, "54011" => TooManyColumns, "54023" => TooManyArguments, // Class 55 — Object Not In Prerequisite State "55000" => ObjectNotInPrerequisiteState, "55006" => ObjectInUse, "55P02" => CantChangeRuntimeParam, "55P03" => LockNotAvailable, // Class 57 — Operator Intervention "57000" => OperatorIntervention, "57014" => QueryCanceled, "57P01" => AdminShutdown, "57P02" => CrashShutdown, "57P03" => CannotConnectNow, "57P04" => DatabaseDropped, // Class 58 — System Error "58000" => SystemError, "58030" => IoError, "58P01" => UndefinedFile, "58P02" => DuplicateFile, // Class F0 — Configuration File Error "F0000" => ConfigFileError, "F0001" => LockFileExists, // Class HV — Foreign Data Wrapper Error (SQL/MED) "HV000" => FdwError, "HV005" => FdwColumnNameNotFound, "HV002" => FdwDynamicParameterValueNeeded, "HV010" => FdwFunctionSequenceError, "HV021" => FdwInconsistentDescriptorInformation, "HV024" => FdwInvalidAttributeValue, "HV007" => FdwInvalidColumnName, "HV008" => FdwInvalidColumnNumber, "HV004" => FdwInvalidDataType, "HV006" => FdwInvalidDataTypeDescriptors, "HV091" => FdwInvalidDescriptorFieldIdentifier, "HV00B" => FdwInvalidHandle, "HV00C" => FdwInvalidOptionIndex, "HV00D" => FdwInvalidOptionName, "HV090" => FdwInvalidStringLengthOrBufferLength, "HV00A" => FdwInvalidStringFormat, "HV009" => FdwInvalidUseOfNullPointer, "HV014" => FdwTooManyHandles, "HV001" => FdwOutOfMemory, "HV00P" => FdwNoSchemas, "HV00J" => FdwOptionNameNotFound, "HV00K" => FdwReplyHandle, "HV00Q" => FdwSchemaNotFound, "HV00R" => FdwTableNotFound, "HV00L" => FdwUnableToCreateExcecution, "HV00M" => FdwUnableToCreateReply, "HV00N" => FdwUnableToEstablishConnection, // Class P0 — PL/pgSQL Error "P0000" => PlpgsqlError, "P0001" => RaiseException, "P0002" => NoDataFound, "P0003" => TooManyRows, // Class XX — Internal Error "XX000" => InternalError, "XX001" => DataCorrupted, "XX002" => IndexCorrupted ) /// Reasons a new Postgres connection could fail #[deriving(Clone, PartialEq, Eq)] pub enum PostgresConnectError { /// The provided URL could not be parsed InvalidUrl(String), /// The URL was missing a user MissingUser, /// There was an error opening a socket to the server SocketError(io::IoError), /// An error from the Postgres server itself PgConnectDbError(PostgresDbError), /// A password was required but not provided in the URL MissingPassword, /// The Postgres server requested an authentication method not supported /// by the driver UnsupportedAuthentication, /// The Postgres server does not support SSL encryption NoSslSupport, /// There was an error initializing the SSL session SslError(error::SslError), /// There was an error communicating with the server PgConnectStreamError(io::IoError), /// The server sent an unexpected response PgConnectBadResponse, } impl fmt::Show for PostgresConnectError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { match *self { InvalidUrl(ref err) => write!(fmt, "Invalid URL: {}", err), MissingUser => write!(fmt, "User missing in URL"), SocketError(ref err) => write!(fmt, "Unable to open connection to server: {}", err), PgConnectDbError(ref err) => err.fmt(fmt), MissingPassword => write!(fmt, "The server requested a password but none was provided"), UnsupportedAuthentication => write!(fmt, "The server requested an unsupported authentication method"), NoSslSupport => write!(fmt, "The server does not support SSL"), SslError(ref err) => write!(fmt, "Error initiating SSL session: {}", err), PgConnectStreamError(ref err) => write!(fmt, "Error communicating with server: {}", err), PgConnectBadResponse => write!(fmt, "The server returned an unexpected response"), } } } /// Represents the position of an error in a query #[deriving(Clone, PartialEq, Eq)] pub enum PostgresErrorPosition { /// A position in the original query Position(uint), /// A position in an internally generated query InternalPosition { /// The byte position pub position: uint, /// A query generated by the Postgres server pub query: String } } /// Encapsulates a Postgres error or notice. #[deriving(Clone, PartialEq, Eq)] pub struct PostgresDbError { /// The field contents are ERROR, FATAL, or PANIC (in an error
/// or WARNING, NOTICE, DEBUG, INFO, or LOG (in a notice message), or a /// localized translation of one of these. pub severity: String, /// The SQLSTATE code for the error. pub code: PostgresSqlState, /// The primary human-readable error message. This should be accurate but /// terse (typically one line). pub message: String, /// An optional secondary error message carrying more detail about the /// problem. Might run to multiple lines. pub detail: Option<String>, /// An optional suggestion what to do about the problem. This is intended /// to differ from Detail in that it offers advice (potentially /// inappropriate) rather than hard facts. Might run to multiple lines. pub hint: Option<String>, /// An optional error cursor position into either the original query string /// or an internally generated query. pub position: Option<PostgresErrorPosition>, /// An indication of the context in which the error occurred. Presently /// this includes a call stack traceback of active procedural language /// functions and internally-generated queries. The trace is one entry per /// line, most recent first. pub where_: Option<String>, /// If the error was associated with a specific database object, the name /// of the schema containing that object, if any. (PostgreSQL 9.3+) pub schema: Option<String>, /// If the error was associated with a specific table, the name of the /// table. (Refer to the schema name field for the name of the table's /// schema.) (PostgreSQL 9.3+) pub table: Option<String>, /// If the error was associated with a specific table column, the name of /// the column. (Refer to the schema and table name fields to identify the /// table.) (PostgreSQL 9.3+) pub column: Option<String>, /// If the error was associated with a specific data type, the name of the /// data type. (Refer to the schema name field for the name of the data /// type's schema.) (PostgreSQL 9.3+) pub datatype: Option<String>, /// If the error was associated with a specific constraint, the name of the /// constraint. Refer to fields listed above for the associated table or /// domain. (For this purpose, indexes are treated as constraints, even if /// they weren't created with constraint syntax.) (PostgreSQL 9.3+) pub constraint: Option<String>, /// The file name of the source-code location where the error was reported. pub file: String, /// The line number of the source-code location where the error was /// reported. pub line: uint, /// The name of the source-code routine reporting the error. pub routine: String } impl PostgresDbError { #[doc(hidden)] pub fn new_raw(fields: Vec<(u8, String)>) -> Result<PostgresDbError, ()> { let mut map: HashMap<_, _> = fields.into_iter().collect(); Ok(PostgresDbError { severity: try!(map.pop(&b'S').ok_or(())), code: PostgresSqlState::from_code(try!(map.pop(&b'C').ok_or(()))[]), message: try!(map.pop(&b'M').ok_or(())), detail: map.pop(&b'D'), hint: map.pop(&b'H'), position: match map.pop(&b'P') { Some(pos) => Some(Position(try!(from_str(pos[]).ok_or(())))), None => match map.pop(&b'p') { Some(pos) => Some(InternalPosition { position: try!(from_str(pos[]).ok_or(())), query: try!(map.pop(&b'q').ok_or(())) }), None => None } }, where_: map.pop(&b'W'), schema: map.pop(&b's'), table: map.pop(&b't'), column: map.pop(&b'c'), datatype: map.pop(&b'd'), constraint: map.pop(&b'n'), file: try!(map.pop(&b'F').ok_or(())), line: try!(map.pop(&b'L').and_then(|l| from_str(l[])).ok_or(())), routine: try!(map.pop(&b'R').ok_or(())), }) } #[doc(hidden)] pub fn new_connect<T>(fields: Vec<(u8, String)>) -> Result<T, PostgresConnectError> { match PostgresDbError::new_raw(fields) { Ok(err) => Err(PgConnectDbError(err)), Err(()) => Err(PgConnectBadResponse), } } #[doc(hidden)] pub fn new<T>(fields: Vec<(u8, String)>) -> PostgresResult<T> { match PostgresDbError::new_raw(fields) { Ok(err) => Err(PgDbError(err)), Err(()) => Err(PgBadData), } } } impl fmt::Show for PostgresDbError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "{}: {}", self.severity, self.message) } } /// An error encountered when communicating with the Postgres server #[deriving(Clone, PartialEq, Eq)] pub enum PostgresError { /// An error reported by the Postgres server PgDbError(PostgresDbError), /// An error communicating with the Postgres server PgStreamError(io::IoError), /// The communication channel with the Postgres server has desynchronized /// due to an earlier communications error. PgStreamDesynchronized, /// A prepared statement was executed on a connection it does not belong to PgWrongConnection, /// An incorrect number of parameters were bound to a statement PgWrongParamCount { /// The expected number of parameters pub expected: uint, /// The actual number of parameters pub actual: uint, }, /// An attempt was made to convert between incompatible Rust and Postgres /// types PgWrongType(PostgresType), /// An attempt was made to read from a column that does not exist PgInvalidColumn, /// A value was NULL but converted to a non-nullable Rust type PgWasNull, /// An attempt was made to prepare a statement or start a transaction on an /// object other than the active transaction PgWrongTransaction, /// The server returned an unexpected response PgBadResponse, /// The server provided data that the client could not parse PgBadData, } impl fmt::Show for PostgresError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { match *self { PgDbError(ref err) => err.fmt(fmt), PgStreamError(ref err) => err.fmt(fmt), PgStreamDesynchronized => write!(fmt, "Communication with the server has desynchronized due to an earlier \ IO error"), PgWrongConnection => write!(fmt, "A statement was executed with a connection it was not prepared with"), PgWrongParamCount { expected, actual } => write!(fmt, "Expected {} parameters but got {}", expected, actual), PgWrongType(ref ty) => write!(fmt, "Unexpected type {}", ty), PgInvalidColumn => write!(fmt, "Invalid column"), PgWasNull => write!(fmt, "The value was NULL"), PgWrongTransaction => write!(fmt, "An attempt was made to prepare a statement or start a transaction on \ an object other than the active transaction"), PgBadResponse => write!(fmt, "The server returned an unexpected response"), PgBadData => write!(fmt, "The server provided data that the client could not parse"), } } }
message),
identifier_name
error.rs
//! Postgres errors use std::collections::HashMap; use std::io; use std::fmt; use openssl::ssl::error; use phf::PhfMap; use PostgresResult; use types::PostgresType; macro_rules! make_errors( ($($code:expr => $error:ident),+) => ( /// SQLSTATE error codes #[deriving(PartialEq, Eq, Clone, Show)] #[allow(missing_doc)] pub enum PostgresSqlState { $($error,)+ UnknownSqlState(String) } static STATE_MAP: PhfMap<&'static str, PostgresSqlState> = phf_map!( $($code => $error),+ ); impl PostgresSqlState { #[doc(hidden)] pub fn from_code(s: &str) -> PostgresSqlState { match STATE_MAP.find_equiv(&s) { Some(state) => state.clone(), None => UnknownSqlState(s.into_string()) } } } ) ) // From http://www.postgresql.org/docs/9.2/static/errcodes-appendix.html make_errors!( // Class 00 — Successful Completion "00000" => SuccessfulCompletion, // Class 01 — Warning "01000" => Warning, "0100C" => DynamicResultSetsReturned, "01008" => ImplicitZeroBitPadding, "01003" => NullValueEliminatedInSetFunction, "01007" => PrivilegeNotGranted, "01006" => PrivilegeNotRevoked, "01004" => StringDataRightTruncationWarning, "01P01" => DeprecatedFeature, // Class 02 — No Data "02000" => NoData, "02001" => NoAdditionalDynamicResultSetsReturned, // Class 03 — SQL Statement Not Yet Complete "03000" => SqlStatementNotYetComplete, // Class 08 — Connection Exception "08000" => ConnectionException, "08003" => ConnectionDoesNotExist, "08006" => ConnectionFailure, "08001" => SqlclientUnableToEstablishSqlconnection, "08004" => SqlserverRejectedEstablishmentOfSqlconnection, "08007" => TransactionResolutionUnknown, "08P01" => ProtocolViolation, // Class 09 — Triggered Action Exception "09000" => TriggeredActionException, // Class 0A — Feature Not Supported "0A000" => FeatureNotSupported, // Class 0B — Invalid Transaction Initiation "0B000" => InvalidTransactionInitiation, // Class 0F — Locator Exception "0F000" => LocatorException, "0F001" => InvalidLocatorException, // Class 0L — Invalid Grantor "0L000" => InvalidGrantor, "0LP01" => InvalidGrantOperation, // Class 0P — Invalid Role Specification "0P000" => InvalidRoleSpecification, // Class 0Z — Diagnostics Exception "0Z000" => DiagnosticsException, "0Z002" => StackedDiagnosticsAccessedWithoutActiveHandler, // Class 20 — Case Not Found "20000" => CaseNotFound, // Class 21 — Cardinality Violation "21000" => CardinalityViolation, // Class 22 — Data Exception "22000" => DataException, "2202E" => ArraySubscriptError, "22021" => CharacterNotInRepertoire, "22008" => DatetimeFieldOverflow, "22012" => DivisionByZero, "22005" => ErrorInAssignment, "2200B" => EscapeCharacterConflict, "22022" => IndicatorOverflow, "22015" => IntervalFieldOverflow, "2201E" => InvalidArgumentForLogarithm, "22014" => InvalidArgumentForNtileFunction, "22016" => InvalidArgumentForNthValueFunction, "2201F" => InvalidArgumentForPowerFunction, "2201G" => InvalidArgumentForWidthBucketFunction, "22018" => InvalidCharacterValueForCast, "22007" => InvalidDatetimeFormat, "22019" => InvalidEscapeCharacter, "2200D" => InvalidEscapeOctet, "22025" => InvalidEscapeSequence, "22P06" => NonstandardUseOfEscapeCharacter, "22010" => InvalidIndicatorParameterValue, "22023" => InvalidParameterValue, "2201B" => InvalidRegularExpression, "2201W" => InvalidRowCountInLimitClause, "2201X" => InvalidRowCountInResultOffsetClause, "22009" => InvalidTimeZoneDisplacementValue, "2200C" => InvalidUseOfEscapeCharacter, "2200G" => MostSpecificTypeMismatch, "22004" => NullValueNotAllowedData, "22002" => NullValueNoIndicatorParameter, "22003" => NumericValueOutOfRange, "22026" => StringDataLengthMismatch, "22001" => StringDataRightTruncationException, "22011" => SubstringError, "22027" => TrimError, "22024" => UnterminatedCString, "2200F" => ZeroLengthCharacterString, "22P01" => FloatingPointException, "22P02" => InvalidTextRepresentation, "22P03" => InvalidBinaryRepresentation, "22P04" => BadCopyFileFormat, "22P05" => UntranslatableCharacter, "2200L" => NotAnXmlDocument, "2200M" => InvalidXmlDocument, "2200N" => InvalidXmlContent, "2200S" => InvalidXmlComment, "2200T" => InvalidXmlProcessingInstruction, // Class 23 — Integrity Constraint Violation "23000" => IntegrityConstraintViolation, "23001" => RestrictViolation, "23502" => NotNullViolation, "23503" => ForeignKeyViolation, "23505" => UniqueViolation, "23514" => CheckViolation, "32P01" => ExclusionViolation, // Class 24 — Invalid Cursor State "24000" => InvalidCursorState, // Class 25 — Invalid Transaction State "25000" => InvalidTransactionState, "25001" => ActiveSqlTransaction, "25002" => BranchTransactionAlreadyActive, "25008" => HeldCursorRequiresSameIsolationLevel, "25003" => InappropriateAccessModeForBranchTransaction, "25004" => InappropriateIsolationLevelForBranchTransaction, "25005" => NoActiveSqlTransactionForBranchTransaction, "25006" => ReadOnlySqlTransaction, "25007" => SchemaAndDataStatementMixingNotSupported, "25P01" => NoActiveSqlTransaction, "25P02" => InFailedSqlTransaction, // Class 26 — Invalid SQL Statement Name "26000" => InvalidSqlStatementName, // Class 27 — Triggered Data Change Violation "27000" => TriggeredDataChangeViolation, // Class 28 — Invalid Authorization Specification "28000" => InvalidAuthorizationSpecification, "28P01" => InvalidPassword, // Class 2B — Dependent Privilege Descriptors Still Exist "2B000" => DependentPrivilegeDescriptorsStillExist, "2BP01" => DependentObjectsStillExist, // Class 2D — Invalid Transaction Termination "2D000" => InvalidTransactionTermination, // Class 2F — SQL Routine Exception "2F000" => SqlRoutineException, "2F005" => FunctionExecutedNoReturnStatement,
"2F003" => ProhibitedSqlStatementAttemptedSqlRoutine, "2F004" => ReadingSqlDataNotPermittedSqlRoutine, // Class 34 — Invalid Cursor Name "34000" => InvalidCursorName, // Class 38 — External Routine Exception "38000" => ExternalRoutineException, "38001" => ContainingSqlNotPermitted, "38002" => ModifyingSqlDataNotPermittedExternalRoutine, "38003" => ProhibitedSqlStatementAttemptedExternalRoutine, "38004" => ReadingSqlDataNotPermittedExternalRoutine, // Class 39 — External Routine Invocation Exception "39000" => ExternalRoutineInvocationException, "39001" => InvalidSqlstateReturned, "39004" => NullValueNotAllowedExternalRoutine, "39P01" => TriggerProtocolViolated, "39P02" => SrfProtocolViolated, // Class 3B — Savepoint Exception "3B000" => SavepointException, "3B001" => InvalidSavepointException, // Class 3D — Invalid Catalog Name "3D000" => InvalidCatalogName, // Class 3F — Invalid Schema Name "3F000" => InvalidSchemaName, // Class 40 — Transaction Rollback "40000" => TransactionRollback, "40002" => TransactionIntegrityConstraintViolation, "40001" => SerializationFailure, "40003" => StatementCompletionUnknown, "40P01" => DeadlockDetected, // Class 42 — Syntax Error or Access Rule Violation "42000" => SyntaxErrorOrAccessRuleViolation, "42601" => SyntaxError, "42501" => InsufficientPrivilege, "42846" => CannotCoerce, "42803" => GroupingError, "42P20" => WindowingError, "42P19" => InvalidRecursion, "42830" => InvalidForeignKey, "42602" => InvalidName, "42622" => NameTooLong, "42939" => ReservedName, "42804" => DatatypeMismatch, "42P18" => IndeterminateDatatype, "42P21" => CollationMismatch, "42P22" => IndeterminateCollation, "42809" => WrongObjectType, "42703" => UndefinedColumn, "42883" => UndefinedFunction, "42P01" => UndefinedTable, "42P02" => UndefinedParameter, "42704" => UndefinedObject, "42701" => DuplicateColumn, "42P03" => DuplicateCursor, "42P04" => DuplicateDatabase, "42723" => DuplicateFunction, "42P05" => DuplicatePreparedStatement, "42P06" => DuplicateSchema, "42P07" => DuplicateTable, "42712" => DuplicateAliaas, "42710" => DuplicateObject, "42702" => AmbiguousColumn, "42725" => AmbiguousFunction, "42P08" => AmbiguousParameter, "42P09" => AmbiguousAlias, "42P10" => InvalidColumnReference, "42611" => InvalidColumnDefinition, "42P11" => InvalidCursorDefinition, "42P12" => InvalidDatabaseDefinition, "42P13" => InvalidFunctionDefinition, "42P14" => InvalidPreparedStatementDefinition, "42P15" => InvalidSchemaDefinition, "42P16" => InvalidTableDefinition, "42P17" => InvalidObjectDefinition, // Class 44 — WITH CHECK OPTION Violation "44000" => WithCheckOptionViolation, // Class 53 — Insufficient Resources "53000" => InsufficientResources, "53100" => DiskFull, "53200" => OutOfMemory, "53300" => TooManyConnections, "53400" => ConfigurationLimitExceeded, // Class 54 — Program Limit Exceeded "54000" => ProgramLimitExceeded, "54001" => StatementTooComplex, "54011" => TooManyColumns, "54023" => TooManyArguments, // Class 55 — Object Not In Prerequisite State "55000" => ObjectNotInPrerequisiteState, "55006" => ObjectInUse, "55P02" => CantChangeRuntimeParam, "55P03" => LockNotAvailable, // Class 57 — Operator Intervention "57000" => OperatorIntervention, "57014" => QueryCanceled, "57P01" => AdminShutdown, "57P02" => CrashShutdown, "57P03" => CannotConnectNow, "57P04" => DatabaseDropped, // Class 58 — System Error "58000" => SystemError, "58030" => IoError, "58P01" => UndefinedFile, "58P02" => DuplicateFile, // Class F0 — Configuration File Error "F0000" => ConfigFileError, "F0001" => LockFileExists, // Class HV — Foreign Data Wrapper Error (SQL/MED) "HV000" => FdwError, "HV005" => FdwColumnNameNotFound, "HV002" => FdwDynamicParameterValueNeeded, "HV010" => FdwFunctionSequenceError, "HV021" => FdwInconsistentDescriptorInformation, "HV024" => FdwInvalidAttributeValue, "HV007" => FdwInvalidColumnName, "HV008" => FdwInvalidColumnNumber, "HV004" => FdwInvalidDataType, "HV006" => FdwInvalidDataTypeDescriptors, "HV091" => FdwInvalidDescriptorFieldIdentifier, "HV00B" => FdwInvalidHandle, "HV00C" => FdwInvalidOptionIndex, "HV00D" => FdwInvalidOptionName, "HV090" => FdwInvalidStringLengthOrBufferLength, "HV00A" => FdwInvalidStringFormat, "HV009" => FdwInvalidUseOfNullPointer, "HV014" => FdwTooManyHandles, "HV001" => FdwOutOfMemory, "HV00P" => FdwNoSchemas, "HV00J" => FdwOptionNameNotFound, "HV00K" => FdwReplyHandle, "HV00Q" => FdwSchemaNotFound, "HV00R" => FdwTableNotFound, "HV00L" => FdwUnableToCreateExcecution, "HV00M" => FdwUnableToCreateReply, "HV00N" => FdwUnableToEstablishConnection, // Class P0 — PL/pgSQL Error "P0000" => PlpgsqlError, "P0001" => RaiseException, "P0002" => NoDataFound, "P0003" => TooManyRows, // Class XX — Internal Error "XX000" => InternalError, "XX001" => DataCorrupted, "XX002" => IndexCorrupted ) /// Reasons a new Postgres connection could fail #[deriving(Clone, PartialEq, Eq)] pub enum PostgresConnectError { /// The provided URL could not be parsed InvalidUrl(String), /// The URL was missing a user MissingUser, /// There was an error opening a socket to the server SocketError(io::IoError), /// An error from the Postgres server itself PgConnectDbError(PostgresDbError), /// A password was required but not provided in the URL MissingPassword, /// The Postgres server requested an authentication method not supported /// by the driver UnsupportedAuthentication, /// The Postgres server does not support SSL encryption NoSslSupport, /// There was an error initializing the SSL session SslError(error::SslError), /// There was an error communicating with the server PgConnectStreamError(io::IoError), /// The server sent an unexpected response PgConnectBadResponse, } impl fmt::Show for PostgresConnectError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { match *self { InvalidUrl(ref err) => write!(fmt, "Invalid URL: {}", err), MissingUser => write!(fmt, "User missing in URL"), SocketError(ref err) => write!(fmt, "Unable to open connection to server: {}", err), PgConnectDbError(ref err) => err.fmt(fmt), MissingPassword => write!(fmt, "The server requested a password but none was provided"), UnsupportedAuthentication => write!(fmt, "The server requested an unsupported authentication method"), NoSslSupport => write!(fmt, "The server does not support SSL"), SslError(ref err) => write!(fmt, "Error initiating SSL session: {}", err), PgConnectStreamError(ref err) => write!(fmt, "Error communicating with server: {}", err), PgConnectBadResponse => write!(fmt, "The server returned an unexpected response"), } } } /// Represents the position of an error in a query #[deriving(Clone, PartialEq, Eq)] pub enum PostgresErrorPosition { /// A position in the original query Position(uint), /// A position in an internally generated query InternalPosition { /// The byte position pub position: uint, /// A query generated by the Postgres server pub query: String } } /// Encapsulates a Postgres error or notice. #[deriving(Clone, PartialEq, Eq)] pub struct PostgresDbError { /// The field contents are ERROR, FATAL, or PANIC (in an error message), /// or WARNING, NOTICE, DEBUG, INFO, or LOG (in a notice message), or a /// localized translation of one of these. pub severity: String, /// The SQLSTATE code for the error. pub code: PostgresSqlState, /// The primary human-readable error message. This should be accurate but /// terse (typically one line). pub message: String, /// An optional secondary error message carrying more detail about the /// problem. Might run to multiple lines. pub detail: Option<String>, /// An optional suggestion what to do about the problem. This is intended /// to differ from Detail in that it offers advice (potentially /// inappropriate) rather than hard facts. Might run to multiple lines. pub hint: Option<String>, /// An optional error cursor position into either the original query string /// or an internally generated query. pub position: Option<PostgresErrorPosition>, /// An indication of the context in which the error occurred. Presently /// this includes a call stack traceback of active procedural language /// functions and internally-generated queries. The trace is one entry per /// line, most recent first. pub where_: Option<String>, /// If the error was associated with a specific database object, the name /// of the schema containing that object, if any. (PostgreSQL 9.3+) pub schema: Option<String>, /// If the error was associated with a specific table, the name of the /// table. (Refer to the schema name field for the name of the table's /// schema.) (PostgreSQL 9.3+) pub table: Option<String>, /// If the error was associated with a specific table column, the name of /// the column. (Refer to the schema and table name fields to identify the /// table.) (PostgreSQL 9.3+) pub column: Option<String>, /// If the error was associated with a specific data type, the name of the /// data type. (Refer to the schema name field for the name of the data /// type's schema.) (PostgreSQL 9.3+) pub datatype: Option<String>, /// If the error was associated with a specific constraint, the name of the /// constraint. Refer to fields listed above for the associated table or /// domain. (For this purpose, indexes are treated as constraints, even if /// they weren't created with constraint syntax.) (PostgreSQL 9.3+) pub constraint: Option<String>, /// The file name of the source-code location where the error was reported. pub file: String, /// The line number of the source-code location where the error was /// reported. pub line: uint, /// The name of the source-code routine reporting the error. pub routine: String } impl PostgresDbError { #[doc(hidden)] pub fn new_raw(fields: Vec<(u8, String)>) -> Result<PostgresDbError, ()> { let mut map: HashMap<_, _> = fields.into_iter().collect(); Ok(PostgresDbError { severity: try!(map.pop(&b'S').ok_or(())), code: PostgresSqlState::from_code(try!(map.pop(&b'C').ok_or(()))[]), message: try!(map.pop(&b'M').ok_or(())), detail: map.pop(&b'D'), hint: map.pop(&b'H'), position: match map.pop(&b'P') { Some(pos) => Some(Position(try!(from_str(pos[]).ok_or(())))), None => match map.pop(&b'p') { Some(pos) => Some(InternalPosition { position: try!(from_str(pos[]).ok_or(())), query: try!(map.pop(&b'q').ok_or(())) }), None => None } }, where_: map.pop(&b'W'), schema: map.pop(&b's'), table: map.pop(&b't'), column: map.pop(&b'c'), datatype: map.pop(&b'd'), constraint: map.pop(&b'n'), file: try!(map.pop(&b'F').ok_or(())), line: try!(map.pop(&b'L').and_then(|l| from_str(l[])).ok_or(())), routine: try!(map.pop(&b'R').ok_or(())), }) } #[doc(hidden)] pub fn new_connect<T>(fields: Vec<(u8, String)>) -> Result<T, PostgresConnectError> { match PostgresDbError::new_raw(fields) { Ok(err) => Err(PgConnectDbError(err)), Err(()) => Err(PgConnectBadResponse), } } #[doc(hidden)] pub fn new<T>(fields: Vec<(u8, String)>) -> PostgresResult<T> { match PostgresDbError::new_raw(fields) { Ok(err) => Err(PgDbError(err)), Err(()) => Err(PgBadData), } } } impl fmt::Show for PostgresDbError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "{}: {}", self.severity, self.message) } } /// An error encountered when communicating with the Postgres server #[deriving(Clone, PartialEq, Eq)] pub enum PostgresError { /// An error reported by the Postgres server PgDbError(PostgresDbError), /// An error communicating with the Postgres server PgStreamError(io::IoError), /// The communication channel with the Postgres server has desynchronized /// due to an earlier communications error. PgStreamDesynchronized, /// A prepared statement was executed on a connection it does not belong to PgWrongConnection, /// An incorrect number of parameters were bound to a statement PgWrongParamCount { /// The expected number of parameters pub expected: uint, /// The actual number of parameters pub actual: uint, }, /// An attempt was made to convert between incompatible Rust and Postgres /// types PgWrongType(PostgresType), /// An attempt was made to read from a column that does not exist PgInvalidColumn, /// A value was NULL but converted to a non-nullable Rust type PgWasNull, /// An attempt was made to prepare a statement or start a transaction on an /// object other than the active transaction PgWrongTransaction, /// The server returned an unexpected response PgBadResponse, /// The server provided data that the client could not parse PgBadData, } impl fmt::Show for PostgresError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { match *self { PgDbError(ref err) => err.fmt(fmt), PgStreamError(ref err) => err.fmt(fmt), PgStreamDesynchronized => write!(fmt, "Communication with the server has desynchronized due to an earlier \ IO error"), PgWrongConnection => write!(fmt, "A statement was executed with a connection it was not prepared with"), PgWrongParamCount { expected, actual } => write!(fmt, "Expected {} parameters but got {}", expected, actual), PgWrongType(ref ty) => write!(fmt, "Unexpected type {}", ty), PgInvalidColumn => write!(fmt, "Invalid column"), PgWasNull => write!(fmt, "The value was NULL"), PgWrongTransaction => write!(fmt, "An attempt was made to prepare a statement or start a transaction on \ an object other than the active transaction"), PgBadResponse => write!(fmt, "The server returned an unexpected response"), PgBadData => write!(fmt, "The server provided data that the client could not parse"), } } }
"2F002" => ModifyingSqlDataNotPermittedSqlRoutine,
random_line_split
error.rs
//! Postgres errors use std::collections::HashMap; use std::io; use std::fmt; use openssl::ssl::error; use phf::PhfMap; use PostgresResult; use types::PostgresType; macro_rules! make_errors( ($($code:expr => $error:ident),+) => ( /// SQLSTATE error codes #[deriving(PartialEq, Eq, Clone, Show)] #[allow(missing_doc)] pub enum PostgresSqlState { $($error,)+ UnknownSqlState(String) } static STATE_MAP: PhfMap<&'static str, PostgresSqlState> = phf_map!( $($code => $error),+ ); impl PostgresSqlState { #[doc(hidden)] pub fn from_code(s: &str) -> PostgresSqlState { match STATE_MAP.find_equiv(&s) { Some(state) => state.clone(), None => UnknownSqlState(s.into_string()) } } } ) ) // From http://www.postgresql.org/docs/9.2/static/errcodes-appendix.html make_errors!( // Class 00 — Successful Completion "00000" => SuccessfulCompletion, // Class 01 — Warning "01000" => Warning, "0100C" => DynamicResultSetsReturned, "01008" => ImplicitZeroBitPadding, "01003" => NullValueEliminatedInSetFunction, "01007" => PrivilegeNotGranted, "01006" => PrivilegeNotRevoked, "01004" => StringDataRightTruncationWarning, "01P01" => DeprecatedFeature, // Class 02 — No Data "02000" => NoData, "02001" => NoAdditionalDynamicResultSetsReturned, // Class 03 — SQL Statement Not Yet Complete "03000" => SqlStatementNotYetComplete, // Class 08 — Connection Exception "08000" => ConnectionException, "08003" => ConnectionDoesNotExist, "08006" => ConnectionFailure, "08001" => SqlclientUnableToEstablishSqlconnection, "08004" => SqlserverRejectedEstablishmentOfSqlconnection, "08007" => TransactionResolutionUnknown, "08P01" => ProtocolViolation, // Class 09 — Triggered Action Exception "09000" => TriggeredActionException, // Class 0A — Feature Not Supported "0A000" => FeatureNotSupported, // Class 0B — Invalid Transaction Initiation "0B000" => InvalidTransactionInitiation, // Class 0F — Locator Exception "0F000" => LocatorException, "0F001" => InvalidLocatorException, // Class 0L — Invalid Grantor "0L000" => InvalidGrantor, "0LP01" => InvalidGrantOperation, // Class 0P — Invalid Role Specification "0P000" => InvalidRoleSpecification, // Class 0Z — Diagnostics Exception "0Z000" => DiagnosticsException, "0Z002" => StackedDiagnosticsAccessedWithoutActiveHandler, // Class 20 — Case Not Found "20000" => CaseNotFound, // Class 21 — Cardinality Violation "21000" => CardinalityViolation, // Class 22 — Data Exception "22000" => DataException, "2202E" => ArraySubscriptError, "22021" => CharacterNotInRepertoire, "22008" => DatetimeFieldOverflow, "22012" => DivisionByZero, "22005" => ErrorInAssignment, "2200B" => EscapeCharacterConflict, "22022" => IndicatorOverflow, "22015" => IntervalFieldOverflow, "2201E" => InvalidArgumentForLogarithm, "22014" => InvalidArgumentForNtileFunction, "22016" => InvalidArgumentForNthValueFunction, "2201F" => InvalidArgumentForPowerFunction, "2201G" => InvalidArgumentForWidthBucketFunction, "22018" => InvalidCharacterValueForCast, "22007" => InvalidDatetimeFormat, "22019" => InvalidEscapeCharacter, "2200D" => InvalidEscapeOctet, "22025" => InvalidEscapeSequence, "22P06" => NonstandardUseOfEscapeCharacter, "22010" => InvalidIndicatorParameterValue, "22023" => InvalidParameterValue, "2201B" => InvalidRegularExpression, "2201W" => InvalidRowCountInLimitClause, "2201X" => InvalidRowCountInResultOffsetClause, "22009" => InvalidTimeZoneDisplacementValue, "2200C" => InvalidUseOfEscapeCharacter, "2200G" => MostSpecificTypeMismatch, "22004" => NullValueNotAllowedData, "22002" => NullValueNoIndicatorParameter, "22003" => NumericValueOutOfRange, "22026" => StringDataLengthMismatch, "22001" => StringDataRightTruncationException, "22011" => SubstringError, "22027" => TrimError, "22024" => UnterminatedCString, "2200F" => ZeroLengthCharacterString, "22P01" => FloatingPointException, "22P02" => InvalidTextRepresentation, "22P03" => InvalidBinaryRepresentation, "22P04" => BadCopyFileFormat, "22P05" => UntranslatableCharacter, "2200L" => NotAnXmlDocument, "2200M" => InvalidXmlDocument, "2200N" => InvalidXmlContent, "2200S" => InvalidXmlComment, "2200T" => InvalidXmlProcessingInstruction, // Class 23 — Integrity Constraint Violation "23000" => IntegrityConstraintViolation, "23001" => RestrictViolation, "23502" => NotNullViolation, "23503" => ForeignKeyViolation, "23505" => UniqueViolation, "23514" => CheckViolation, "32P01" => ExclusionViolation, // Class 24 — Invalid Cursor State "24000" => InvalidCursorState, // Class 25 — Invalid Transaction State "25000" => InvalidTransactionState, "25001" => ActiveSqlTransaction, "25002" => BranchTransactionAlreadyActive, "25008" => HeldCursorRequiresSameIsolationLevel, "25003" => InappropriateAccessModeForBranchTransaction, "25004" => InappropriateIsolationLevelForBranchTransaction, "25005" => NoActiveSqlTransactionForBranchTransaction, "25006" => ReadOnlySqlTransaction, "25007" => SchemaAndDataStatementMixingNotSupported, "25P01" => NoActiveSqlTransaction, "25P02" => InFailedSqlTransaction, // Class 26 — Invalid SQL Statement Name "26000" => InvalidSqlStatementName, // Class 27 — Triggered Data Change Violation "27000" => TriggeredDataChangeViolation, // Class 28 — Invalid Authorization Specification "28000" => InvalidAuthorizationSpecification, "28P01" => InvalidPassword, // Class 2B — Dependent Privilege Descriptors Still Exist "2B000" => DependentPrivilegeDescriptorsStillExist, "2BP01" => DependentObjectsStillExist, // Class 2D — Invalid Transaction Termination "2D000" => InvalidTransactionTermination, // Class 2F — SQL Routine Exception "2F000" => SqlRoutineException, "2F005" => FunctionExecutedNoReturnStatement, "2F002" => ModifyingSqlDataNotPermittedSqlRoutine, "2F003" => ProhibitedSqlStatementAttemptedSqlRoutine, "2F004" => ReadingSqlDataNotPermittedSqlRoutine, // Class 34 — Invalid Cursor Name "34000" => InvalidCursorName, // Class 38 — External Routine Exception "38000" => ExternalRoutineException, "38001" => ContainingSqlNotPermitted, "38002" => ModifyingSqlDataNotPermittedExternalRoutine, "38003" => ProhibitedSqlStatementAttemptedExternalRoutine, "38004" => ReadingSqlDataNotPermittedExternalRoutine, // Class 39 — External Routine Invocation Exception "39000" => ExternalRoutineInvocationException, "39001" => InvalidSqlstateReturned, "39004" => NullValueNotAllowedExternalRoutine, "39P01" => TriggerProtocolViolated, "39P02" => SrfProtocolViolated, // Class 3B — Savepoint Exception "3B000" => SavepointException, "3B001" => InvalidSavepointException, // Class 3D — Invalid Catalog Name "3D000" => InvalidCatalogName, // Class 3F — Invalid Schema Name "3F000" => InvalidSchemaName, // Class 40 — Transaction Rollback "40000" => TransactionRollback, "40002" => TransactionIntegrityConstraintViolation, "40001" => SerializationFailure, "40003" => StatementCompletionUnknown, "40P01" => DeadlockDetected, // Class 42 — Syntax Error or Access Rule Violation "42000" => SyntaxErrorOrAccessRuleViolation, "42601" => SyntaxError, "42501" => InsufficientPrivilege, "42846" => CannotCoerce, "42803" => GroupingError, "42P20" => WindowingError, "42P19" => InvalidRecursion, "42830" => InvalidForeignKey, "42602" => InvalidName, "42622" => NameTooLong, "42939" => ReservedName, "42804" => DatatypeMismatch, "42P18" => IndeterminateDatatype, "42P21" => CollationMismatch, "42P22" => IndeterminateCollation, "42809" => WrongObjectType, "42703" => UndefinedColumn, "42883" => UndefinedFunction, "42P01" => UndefinedTable, "42P02" => UndefinedParameter, "42704" => UndefinedObject, "42701" => DuplicateColumn, "42P03" => DuplicateCursor, "42P04" => DuplicateDatabase, "42723" => DuplicateFunction, "42P05" => DuplicatePreparedStatement, "42P06" => DuplicateSchema, "42P07" => DuplicateTable, "42712" => DuplicateAliaas, "42710" => DuplicateObject, "42702" => AmbiguousColumn, "42725" => AmbiguousFunction, "42P08" => AmbiguousParameter, "42P09" => AmbiguousAlias, "42P10" => InvalidColumnReference, "42611" => InvalidColumnDefinition, "42P11" => InvalidCursorDefinition, "42P12" => InvalidDatabaseDefinition, "42P13" => InvalidFunctionDefinition, "42P14" => InvalidPreparedStatementDefinition, "42P15" => InvalidSchemaDefinition, "42P16" => InvalidTableDefinition, "42P17" => InvalidObjectDefinition, // Class 44 — WITH CHECK OPTION Violation "44000" => WithCheckOptionViolation, // Class 53 — Insufficient Resources "53000" => InsufficientResources, "53100" => DiskFull, "53200" => OutOfMemory, "53300" => TooManyConnections, "53400" => ConfigurationLimitExceeded, // Class 54 — Program Limit Exceeded "54000" => ProgramLimitExceeded, "54001" => StatementTooComplex, "54011" => TooManyColumns, "54023" => TooManyArguments, // Class 55 — Object Not In Prerequisite State "55000" => ObjectNotInPrerequisiteState, "55006" => ObjectInUse, "55P02" => CantChangeRuntimeParam, "55P03" => LockNotAvailable, // Class 57 — Operator Intervention "57000" => OperatorIntervention, "57014" => QueryCanceled, "57P01" => AdminShutdown, "57P02" => CrashShutdown, "57P03" => CannotConnectNow, "57P04" => DatabaseDropped, // Class 58 — System Error "58000" => SystemError, "58030" => IoError, "58P01" => UndefinedFile, "58P02" => DuplicateFile, // Class F0 — Configuration File Error "F0000" => ConfigFileError, "F0001" => LockFileExists, // Class HV — Foreign Data Wrapper Error (SQL/MED) "HV000" => FdwError, "HV005" => FdwColumnNameNotFound, "HV002" => FdwDynamicParameterValueNeeded, "HV010" => FdwFunctionSequenceError, "HV021" => FdwInconsistentDescriptorInformation, "HV024" => FdwInvalidAttributeValue, "HV007" => FdwInvalidColumnName, "HV008" => FdwInvalidColumnNumber, "HV004" => FdwInvalidDataType, "HV006" => FdwInvalidDataTypeDescriptors, "HV091" => FdwInvalidDescriptorFieldIdentifier, "HV00B" => FdwInvalidHandle, "HV00C" => FdwInvalidOptionIndex, "HV00D" => FdwInvalidOptionName, "HV090" => FdwInvalidStringLengthOrBufferLength, "HV00A" => FdwInvalidStringFormat, "HV009" => FdwInvalidUseOfNullPointer, "HV014" => FdwTooManyHandles, "HV001" => FdwOutOfMemory, "HV00P" => FdwNoSchemas, "HV00J" => FdwOptionNameNotFound, "HV00K" => FdwReplyHandle, "HV00Q" => FdwSchemaNotFound, "HV00R" => FdwTableNotFound, "HV00L" => FdwUnableToCreateExcecution, "HV00M" => FdwUnableToCreateReply, "HV00N" => FdwUnableToEstablishConnection, // Class P0 — PL/pgSQL Error "P0000" => PlpgsqlError, "P0001" => RaiseException, "P0002" => NoDataFound, "P0003" => TooManyRows, // Class XX — Internal Error "XX000" => InternalError, "XX001" => DataCorrupted, "XX002" => IndexCorrupted ) /// Reasons a new Postgres connection could fail #[deriving(Clone, PartialEq, Eq)] pub enum PostgresConnectError { /// The provided URL could not be parsed InvalidUrl(String), /// The URL was missing a user MissingUser, /// There was an error opening a socket to the server SocketError(io::IoError), /// An error from the Postgres server itself PgConnectDbError(PostgresDbError), /// A password was required but not provided in the URL MissingPassword, /// The Postgres server requested an authentication method not supported /// by the driver UnsupportedAuthentication, /// The Postgres server does not support SSL encryption NoSslSupport, /// There was an error initializing the SSL session SslError(error::SslError), /// There was an error communicating with the server PgConnectStreamError(io::IoError), /// The server sent an unexpected response PgConnectBadResponse, } impl fmt::Show for PostgresConnectError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { match *self { InvalidUrl(ref err) => write!(fmt, "Invalid URL: {}", err), MissingUser => write!(fmt, "User missing in URL"), SocketError(ref err) => write!(fmt, "Unable to open connection to server: {}", err), PgConnectDbError(ref err) => err.fmt(fmt), MissingPassword => write!(fmt, "The server requested a password but none was provided"), UnsupportedAuthentication => write!(fmt, "The server requested an unsupported authentication method"), NoSslSupport => write!(fmt, "The server does not support SSL"), SslError(ref err) => write!(fmt, "Error initiating SSL session: {}", err), PgConnectStreamError(ref err) => write!(fmt, "Error communicating with server: {}", err), PgConnectBadResponse => write!(fmt, "The server returned an unexpected response"), } } } /// Represents the position of an error in a query #[deriving(Clone, PartialEq, Eq)] pub enum PostgresErrorPosition { /// A position in the original query Position(uint), /// A position in an internally generated query InternalPosition { /// The byte position pub position: uint, /// A query generated by the Postgres server pub query: String } } /// Encapsulates a Postgres error or notice. #[deriving(Clone, PartialEq, Eq)] pub struct PostgresDbError { /// The field contents are ERROR, FATAL, or PANIC (in an error message), /// or WARNING, NOTICE, DEBUG, INFO, or LOG (in a notice message), or a /// localized translation of one of these. pub severity: String, /// The SQLSTATE code for the error. pub code: PostgresSqlState, /// The primary human-readable error message. This should be accurate but /// terse (typically one line). pub message: String, /// An optional secondary error message carrying more detail about the /// problem. Might run to multiple lines. pub detail: Option<String>, /// An optional suggestion what to do about the problem. This is intended /// to differ from Detail in that it offers advice (potentially /// inappropriate) rather than hard facts. Might run to multiple lines. pub hint: Option<String>, /// An optional error cursor position into either the original query string /// or an internally generated query. pub position: Option<PostgresErrorPosition>, /// An indication of the context in which the error occurred. Presently /// this includes a call stack traceback of active procedural language /// functions and internally-generated queries. The trace is one entry per /// line, most recent first. pub where_: Option<String>, /// If the error was associated with a specific database object, the name /// of the schema containing that object, if any. (PostgreSQL 9.3+) pub schema: Option<String>, /// If the error was associated with a specific table, the name of the /// table. (Refer to the schema name field for the name of the table's /// schema.) (PostgreSQL 9.3+) pub table: Option<String>, /// If the error was associated with a specific table column, the name of /// the column. (Refer to the schema and table name fields to identify the /// table.) (PostgreSQL 9.3+) pub column: Option<String>, /// If the error was associated with a specific data type, the name of the /// data type. (Refer to the schema name field for the name of the data /// type's schema.) (PostgreSQL 9.3+) pub datatype: Option<String>, /// If the error was associated with a specific constraint, the name of the /// constraint. Refer to fields listed above for the associated table or /// domain. (For this purpose, indexes are treated as constraints, even if /// they weren't created with constraint syntax.) (PostgreSQL 9.3+) pub constraint: Option<String>, /// The file name of the source-code location where the error was reported. pub file: String, /// The line number of the source-code location where the error was /// reported. pub line: uint, /// The name of the source-code routine reporting the error. pub routine: String } impl PostgresDbError { #[doc(hidden)] pub fn new_raw(fields: Vec<(u8, String)>) -> Result<PostgresDbError, ()> { let mut map: HashMap<_, _> = fields.into_iter().collect(); Ok(PostgresDbError { severity: try!(map.pop(&b'S').ok_or(())), code: PostgresSqlState::from_code(try!(map.pop(&b'C').ok_or(()))[]), message: try!(map.pop(&b'M').ok_or(())), detail: map.pop(&b'D'), hint: map.pop(&b'H'), position: match map.pop(&b'P') { Some(pos) => Some(Position(try!(from_str(pos[]).ok_or(())))), None => match map.pop(&b'p') { Some(pos) => Some(InternalPosition { position: try!(from_str(pos[]).ok_or(())), query: try!(map.pop(&b'q').ok_or(())) }), None => None } }, where_: map.pop(&b'W'), schema: map.pop(&b's'), table: map.pop(&b't'), column: map.pop(&b'c'), datatype: map.pop(&b'd'), constraint: map.pop(&b'n'), file: try!(map.pop(&b'F').ok_or(())), line: try!(map.pop(&b'L').and_then(|l| from_str(l[])).ok_or(())), routine: try!(map.pop(&b'R').ok_or(())), }) } #[doc(hidden)] pub fn new_connect<T>(fields: Vec<(u8, String)>) -> Result<T, PostgresConnectError> { match PostgresDbError::new_raw(fields) { Ok(err) => Err(PgConnectDbError(err)), Err(()) => Err(PgConnectBadResponse), } } #[doc(hidden)] pub fn new<T>(fields: Vec<(u8, String)>) -> PostgresResult<T> { match PostgresDbError::new_raw(fields) { Ok(err) => Err(PgDbError(err)), Err(()) => Err(PgBadData), } } } impl fmt::Show for PostgresDbError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "{}: {}", self.severity, self.message) } } /// An error e
Clone, PartialEq, Eq)] pub enum PostgresError { /// An error reported by the Postgres server PgDbError(PostgresDbError), /// An error communicating with the Postgres server PgStreamError(io::IoError), /// The communication channel with the Postgres server has desynchronized /// due to an earlier communications error. PgStreamDesynchronized, /// A prepared statement was executed on a connection it does not belong to PgWrongConnection, /// An incorrect number of parameters were bound to a statement PgWrongParamCount { /// The expected number of parameters pub expected: uint, /// The actual number of parameters pub actual: uint, }, /// An attempt was made to convert between incompatible Rust and Postgres /// types PgWrongType(PostgresType), /// An attempt was made to read from a column that does not exist PgInvalidColumn, /// A value was NULL but converted to a non-nullable Rust type PgWasNull, /// An attempt was made to prepare a statement or start a transaction on an /// object other than the active transaction PgWrongTransaction, /// The server returned an unexpected response PgBadResponse, /// The server provided data that the client could not parse PgBadData, } impl fmt::Show for PostgresError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { match *self { PgDbError(ref err) => err.fmt(fmt), PgStreamError(ref err) => err.fmt(fmt), PgStreamDesynchronized => write!(fmt, "Communication with the server has desynchronized due to an earlier \ IO error"), PgWrongConnection => write!(fmt, "A statement was executed with a connection it was not prepared with"), PgWrongParamCount { expected, actual } => write!(fmt, "Expected {} parameters but got {}", expected, actual), PgWrongType(ref ty) => write!(fmt, "Unexpected type {}", ty), PgInvalidColumn => write!(fmt, "Invalid column"), PgWasNull => write!(fmt, "The value was NULL"), PgWrongTransaction => write!(fmt, "An attempt was made to prepare a statement or start a transaction on \ an object other than the active transaction"), PgBadResponse => write!(fmt, "The server returned an unexpected response"), PgBadData => write!(fmt, "The server provided data that the client could not parse"), } } }
ncountered when communicating with the Postgres server #[deriving(
identifier_body
main.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #![warn(clippy::all)] mod client; mod error_reporting; mod lsp; mod server; use lsp_server::Connection; use std::error::Error; use env_logger::Env; use log::info; #[tokio::main] async fn main() -> Result<(), Box<dyn Error + Sync + Send>> { env_logger::from_env(Env::default().default_filter_or("info, warn, error, debug")).init(); let (connection, io_handles) = Connection::stdio(); info!("Initialized stdio transport layer"); let params = server::initialize(&connection)?; info!("JSON-RPC handshake completed"); server::run(connection, params).await?; io_handles.join()?; Ok(()) } #[cfg(test)] mod tests { use super::client; use super::server; use lsp_server::Connection; use lsp_types::{ClientCapabilities, InitializeParams}; use std::error::Error; #[test] fn initialize() -> Result<(), Box<dyn Error + Sync + Send>>
} }
{ // Test with an in-memory connection pair let (connection, client) = Connection::memory(); // Mock set of client parameters. The `root_path` field is deprecated, but // still required to construct the params, so we allow deprecated fields here. #[allow(deprecated)] let init_params = InitializeParams { process_id: Some(1), root_path: None, root_uri: None, initialization_options: None, capabilities: ClientCapabilities::default(), trace: None, workspace_folders: None, client_info: None, }; client::initialize(&client, &init_params, 0); let params = server::initialize(&connection)?; assert_eq!(params, init_params); Ok(())
identifier_body
main.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */
mod client; mod error_reporting; mod lsp; mod server; use lsp_server::Connection; use std::error::Error; use env_logger::Env; use log::info; #[tokio::main] async fn main() -> Result<(), Box<dyn Error + Sync + Send>> { env_logger::from_env(Env::default().default_filter_or("info, warn, error, debug")).init(); let (connection, io_handles) = Connection::stdio(); info!("Initialized stdio transport layer"); let params = server::initialize(&connection)?; info!("JSON-RPC handshake completed"); server::run(connection, params).await?; io_handles.join()?; Ok(()) } #[cfg(test)] mod tests { use super::client; use super::server; use lsp_server::Connection; use lsp_types::{ClientCapabilities, InitializeParams}; use std::error::Error; #[test] fn initialize() -> Result<(), Box<dyn Error + Sync + Send>> { // Test with an in-memory connection pair let (connection, client) = Connection::memory(); // Mock set of client parameters. The `root_path` field is deprecated, but // still required to construct the params, so we allow deprecated fields here. #[allow(deprecated)] let init_params = InitializeParams { process_id: Some(1), root_path: None, root_uri: None, initialization_options: None, capabilities: ClientCapabilities::default(), trace: None, workspace_folders: None, client_info: None, }; client::initialize(&client, &init_params, 0); let params = server::initialize(&connection)?; assert_eq!(params, init_params); Ok(()) } }
#![warn(clippy::all)]
random_line_split
main.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #![warn(clippy::all)] mod client; mod error_reporting; mod lsp; mod server; use lsp_server::Connection; use std::error::Error; use env_logger::Env; use log::info; #[tokio::main] async fn
() -> Result<(), Box<dyn Error + Sync + Send>> { env_logger::from_env(Env::default().default_filter_or("info, warn, error, debug")).init(); let (connection, io_handles) = Connection::stdio(); info!("Initialized stdio transport layer"); let params = server::initialize(&connection)?; info!("JSON-RPC handshake completed"); server::run(connection, params).await?; io_handles.join()?; Ok(()) } #[cfg(test)] mod tests { use super::client; use super::server; use lsp_server::Connection; use lsp_types::{ClientCapabilities, InitializeParams}; use std::error::Error; #[test] fn initialize() -> Result<(), Box<dyn Error + Sync + Send>> { // Test with an in-memory connection pair let (connection, client) = Connection::memory(); // Mock set of client parameters. The `root_path` field is deprecated, but // still required to construct the params, so we allow deprecated fields here. #[allow(deprecated)] let init_params = InitializeParams { process_id: Some(1), root_path: None, root_uri: None, initialization_options: None, capabilities: ClientCapabilities::default(), trace: None, workspace_folders: None, client_info: None, }; client::initialize(&client, &init_params, 0); let params = server::initialize(&connection)?; assert_eq!(params, init_params); Ok(()) } }
main
identifier_name
borrowck-move-out-of-tuple-struct-with-dtor.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. struct S(~str); impl Drop for S { fn drop(&mut self) { }
match S("foo".to_owned()) { S(_s) => {} //~^ ERROR cannot move out of type `S`, which defines the `Drop` trait } } fn move_in_let() { let S(_s) = S("foo".to_owned()); //~^ ERROR cannot move out of type `S`, which defines the `Drop` trait } fn move_in_fn_arg(S(_s): S) { //~^ ERROR cannot move out of type `S`, which defines the `Drop` trait } fn main() {}
} fn move_in_match() {
random_line_split
borrowck-move-out-of-tuple-struct-with-dtor.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. struct S(~str); impl Drop for S { fn
(&mut self) { } } fn move_in_match() { match S("foo".to_owned()) { S(_s) => {} //~^ ERROR cannot move out of type `S`, which defines the `Drop` trait } } fn move_in_let() { let S(_s) = S("foo".to_owned()); //~^ ERROR cannot move out of type `S`, which defines the `Drop` trait } fn move_in_fn_arg(S(_s): S) { //~^ ERROR cannot move out of type `S`, which defines the `Drop` trait } fn main() {}
drop
identifier_name
borrowck-move-out-of-tuple-struct-with-dtor.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. struct S(~str); impl Drop for S { fn drop(&mut self) { } } fn move_in_match() { match S("foo".to_owned()) { S(_s) => {} //~^ ERROR cannot move out of type `S`, which defines the `Drop` trait } } fn move_in_let() { let S(_s) = S("foo".to_owned()); //~^ ERROR cannot move out of type `S`, which defines the `Drop` trait } fn move_in_fn_arg(S(_s): S) { //~^ ERROR cannot move out of type `S`, which defines the `Drop` trait } fn main()
{}
identifier_body
deactivate.rs
//! `POST /_matrix/client/*/account/deactivate` pub mod v3 { //! `/v3/` ([spec]) //! //! [spec]: https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3accountdeactivate use ruma_common::api::ruma_api; use crate::{ account::ThirdPartyIdRemovalStatus, uiaa::{AuthData, IncomingAuthData, UiaaResponse}, }; ruma_api! { metadata: { description: "Deactivate the current user's account.", method: POST, name: "deactivate", r0_path: "/_matrix/client/r0/account/deactivate", stable_path: "/_matrix/client/v3/account/deactivate",
#[derive(Default)] request: { /// Additional authentication information for the user-interactive authentication API. #[serde(skip_serializing_if = "Option::is_none")] pub auth: Option<AuthData<'a>>, /// Identity server from which to unbind the user's third party /// identifier. #[serde(skip_serializing_if = "Option::is_none")] pub id_server: Option<&'a str>, } response: { /// Result of unbind operation. pub id_server_unbind_result: ThirdPartyIdRemovalStatus, } error: UiaaResponse } impl Request<'_> { /// Creates an empty `Request`. pub fn new() -> Self { Default::default() } } impl Response { /// Creates a new `Response` with the given unbind result. pub fn new(id_server_unbind_result: ThirdPartyIdRemovalStatus) -> Self { Self { id_server_unbind_result } } } }
rate_limited: true, authentication: AccessToken, added: 1.0, }
random_line_split
deactivate.rs
//! `POST /_matrix/client/*/account/deactivate` pub mod v3 { //! `/v3/` ([spec]) //! //! [spec]: https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3accountdeactivate use ruma_common::api::ruma_api; use crate::{ account::ThirdPartyIdRemovalStatus, uiaa::{AuthData, IncomingAuthData, UiaaResponse}, }; ruma_api! { metadata: { description: "Deactivate the current user's account.", method: POST, name: "deactivate", r0_path: "/_matrix/client/r0/account/deactivate", stable_path: "/_matrix/client/v3/account/deactivate", rate_limited: true, authentication: AccessToken, added: 1.0, } #[derive(Default)] request: { /// Additional authentication information for the user-interactive authentication API. #[serde(skip_serializing_if = "Option::is_none")] pub auth: Option<AuthData<'a>>, /// Identity server from which to unbind the user's third party /// identifier. #[serde(skip_serializing_if = "Option::is_none")] pub id_server: Option<&'a str>, } response: { /// Result of unbind operation. pub id_server_unbind_result: ThirdPartyIdRemovalStatus, } error: UiaaResponse } impl Request<'_> { /// Creates an empty `Request`. pub fn new() -> Self { Default::default() } } impl Response { /// Creates a new `Response` with the given unbind result. pub fn new(id_server_unbind_result: ThirdPartyIdRemovalStatus) -> Self
} }
{ Self { id_server_unbind_result } }
identifier_body
deactivate.rs
//! `POST /_matrix/client/*/account/deactivate` pub mod v3 { //! `/v3/` ([spec]) //! //! [spec]: https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3accountdeactivate use ruma_common::api::ruma_api; use crate::{ account::ThirdPartyIdRemovalStatus, uiaa::{AuthData, IncomingAuthData, UiaaResponse}, }; ruma_api! { metadata: { description: "Deactivate the current user's account.", method: POST, name: "deactivate", r0_path: "/_matrix/client/r0/account/deactivate", stable_path: "/_matrix/client/v3/account/deactivate", rate_limited: true, authentication: AccessToken, added: 1.0, } #[derive(Default)] request: { /// Additional authentication information for the user-interactive authentication API. #[serde(skip_serializing_if = "Option::is_none")] pub auth: Option<AuthData<'a>>, /// Identity server from which to unbind the user's third party /// identifier. #[serde(skip_serializing_if = "Option::is_none")] pub id_server: Option<&'a str>, } response: { /// Result of unbind operation. pub id_server_unbind_result: ThirdPartyIdRemovalStatus, } error: UiaaResponse } impl Request<'_> { /// Creates an empty `Request`. pub fn new() -> Self { Default::default() } } impl Response { /// Creates a new `Response` with the given unbind result. pub fn
(id_server_unbind_result: ThirdPartyIdRemovalStatus) -> Self { Self { id_server_unbind_result } } } }
new
identifier_name
stmt_parse.rs
use std::path::PathBuf; use syntax_tree::*; use parser::lexer::lexer_util::lexemes::*; use parser::syntax_parser::expr_parse::*; use parser::syntax_parser::synpar_util::*; use error::{RollerErr, SynErr, ParseResult}; #[allow(unused_variables)] pub fn parse_stmt(input: InType) -> ParseResult<Stmt> { Err(()) .or(parse_assign(input)) .or(parse_fundef(input)) .or(parse_delete(input)) .or(parse_clear(input)) .or(parse_run(input)) .or(parse_save(input)) .or(Err(RollerErr::SyntaxError(SynErr::Unimplemented))) } fn parse_assign(input: InType) -> ParseResult<Stmt, ()> { // if we have an identifier, followed by =, followed by expression if let Some((&Lexeme::Id(ref id), input)) = input.split_first() { if let Some((&Lexeme::Eq, input)) = input.split_first() { if let Ok(exp) = parse_expr(input) { return Ok(Stmt::Assign(id.clone(), exp)); } } } Err(()) } fn parse_fundef(input: InType) -> ParseResult<Stmt, ()> { if let Some((&Lexeme::Id(ref id), input)) = input.split_first() { if let Some((&Lexeme::LeftParen, input)) = input.split_first() { // parse parameters let mut mut_input = input.clone(); let mut param_ids = Vec::new(); loop { match mut_input.split_first() { Some((&Lexeme::Id(ref id), input)) => { // parameter found param_ids.push(id.clone()); mut_input = input; }, Some((&Lexeme::Comma, input)) => { // ignore commas // commas are optional mut_input = input }, Some((&Lexeme::RightParen, input)) => { // found the end of parameter definition mut_input = input; break; }, _ => return Err(()) } } // check if we have an equal sign and an expression if let Some((&Lexeme::Eq, input)) = mut_input.split_first() { if let Ok(exp) = parse_expr(input) { return Ok(Stmt::FnDef(id.clone(), RollerFun::new(param_ids, exp) )); } } } } Err(()) } fn parse_delete(input: InType) -> ParseResult<Stmt, ()> { if let Some((&Lexeme::Kw(KwToken::Delete), input)) = input.split_first() { if let Some((&Lexeme::Id(ref id), input)) = input.split_first() { if input.get(0) == Some(&Lexeme::End) { return Ok(Stmt::Delete(id.clone()) ); } } } Err(()) } fn parse_clear(input: InType) -> ParseResult<Stmt, ()>
fn parse_run(input: InType) -> ParseResult<Stmt, ()> { if let Some((&Lexeme::Kw(KwToken::Run), input)) = input.split_first() { if let Some((&Lexeme::Id(ref s), input)) = input.split_first() { if input.get(0) == Some(&Lexeme::End) { return Ok(Stmt::Run(PathBuf::from(s) )); } } } Err(()) } fn parse_save(input: InType) -> ParseResult<Stmt, ()> { if let Some((&Lexeme::Kw(KwToken::Save), input)) = input.split_first() { if let Some((&Lexeme::Id(ref s), input)) = input.split_first() { if input.get(0) == Some(&Lexeme::End) { return Ok(Stmt::Save(PathBuf::from(s) )); } } } Err(()) }
{ if input.len() == 2 { // clear, end if let Lexeme::Kw(KwToken::Clear) = input[0] { return Ok(Stmt::Clear); } } Err(()) }
identifier_body
stmt_parse.rs
use std::path::PathBuf; use syntax_tree::*; use parser::lexer::lexer_util::lexemes::*; use parser::syntax_parser::expr_parse::*; use parser::syntax_parser::synpar_util::*; use error::{RollerErr, SynErr, ParseResult}; #[allow(unused_variables)] pub fn parse_stmt(input: InType) -> ParseResult<Stmt> { Err(()) .or(parse_assign(input)) .or(parse_fundef(input)) .or(parse_delete(input)) .or(parse_clear(input)) .or(parse_run(input)) .or(parse_save(input)) .or(Err(RollerErr::SyntaxError(SynErr::Unimplemented))) } fn parse_assign(input: InType) -> ParseResult<Stmt, ()> { // if we have an identifier, followed by =, followed by expression if let Some((&Lexeme::Id(ref id), input)) = input.split_first() { if let Some((&Lexeme::Eq, input)) = input.split_first() { if let Ok(exp) = parse_expr(input) { return Ok(Stmt::Assign(id.clone(), exp)); } } } Err(()) } fn parse_fundef(input: InType) -> ParseResult<Stmt, ()> { if let Some((&Lexeme::Id(ref id), input)) = input.split_first() { if let Some((&Lexeme::LeftParen, input)) = input.split_first() { // parse parameters let mut mut_input = input.clone(); let mut param_ids = Vec::new(); loop { match mut_input.split_first() { Some((&Lexeme::Id(ref id), input)) => { // parameter found param_ids.push(id.clone()); mut_input = input; }, Some((&Lexeme::Comma, input)) => { // ignore commas // commas are optional mut_input = input }, Some((&Lexeme::RightParen, input)) => { // found the end of parameter definition mut_input = input; break; }, _ => return Err(()) } } // check if we have an equal sign and an expression if let Some((&Lexeme::Eq, input)) = mut_input.split_first() { if let Ok(exp) = parse_expr(input) { return Ok(Stmt::FnDef(id.clone(), RollerFun::new(param_ids, exp) )); } } } } Err(()) } fn parse_delete(input: InType) -> ParseResult<Stmt, ()> { if let Some((&Lexeme::Kw(KwToken::Delete), input)) = input.split_first() { if let Some((&Lexeme::Id(ref id), input)) = input.split_first() { if input.get(0) == Some(&Lexeme::End) { return Ok(Stmt::Delete(id.clone()) ); } } } Err(()) } fn parse_clear(input: InType) -> ParseResult<Stmt, ()> { if input.len() == 2 { // clear, end if let Lexeme::Kw(KwToken::Clear) = input[0] { return Ok(Stmt::Clear); } } Err(()) } fn parse_run(input: InType) -> ParseResult<Stmt, ()> { if let Some((&Lexeme::Kw(KwToken::Run), input)) = input.split_first() { if let Some((&Lexeme::Id(ref s), input)) = input.split_first() { if input.get(0) == Some(&Lexeme::End) { return Ok(Stmt::Run(PathBuf::from(s) )); } } } Err(()) } fn parse_save(input: InType) -> ParseResult<Stmt, ()> {
if let Some((&Lexeme::Kw(KwToken::Save), input)) = input.split_first() { if let Some((&Lexeme::Id(ref s), input)) = input.split_first() { if input.get(0) == Some(&Lexeme::End) { return Ok(Stmt::Save(PathBuf::from(s) )); } } } Err(()) }
random_line_split
stmt_parse.rs
use std::path::PathBuf; use syntax_tree::*; use parser::lexer::lexer_util::lexemes::*; use parser::syntax_parser::expr_parse::*; use parser::syntax_parser::synpar_util::*; use error::{RollerErr, SynErr, ParseResult}; #[allow(unused_variables)] pub fn
(input: InType) -> ParseResult<Stmt> { Err(()) .or(parse_assign(input)) .or(parse_fundef(input)) .or(parse_delete(input)) .or(parse_clear(input)) .or(parse_run(input)) .or(parse_save(input)) .or(Err(RollerErr::SyntaxError(SynErr::Unimplemented))) } fn parse_assign(input: InType) -> ParseResult<Stmt, ()> { // if we have an identifier, followed by =, followed by expression if let Some((&Lexeme::Id(ref id), input)) = input.split_first() { if let Some((&Lexeme::Eq, input)) = input.split_first() { if let Ok(exp) = parse_expr(input) { return Ok(Stmt::Assign(id.clone(), exp)); } } } Err(()) } fn parse_fundef(input: InType) -> ParseResult<Stmt, ()> { if let Some((&Lexeme::Id(ref id), input)) = input.split_first() { if let Some((&Lexeme::LeftParen, input)) = input.split_first() { // parse parameters let mut mut_input = input.clone(); let mut param_ids = Vec::new(); loop { match mut_input.split_first() { Some((&Lexeme::Id(ref id), input)) => { // parameter found param_ids.push(id.clone()); mut_input = input; }, Some((&Lexeme::Comma, input)) => { // ignore commas // commas are optional mut_input = input }, Some((&Lexeme::RightParen, input)) => { // found the end of parameter definition mut_input = input; break; }, _ => return Err(()) } } // check if we have an equal sign and an expression if let Some((&Lexeme::Eq, input)) = mut_input.split_first() { if let Ok(exp) = parse_expr(input) { return Ok(Stmt::FnDef(id.clone(), RollerFun::new(param_ids, exp) )); } } } } Err(()) } fn parse_delete(input: InType) -> ParseResult<Stmt, ()> { if let Some((&Lexeme::Kw(KwToken::Delete), input)) = input.split_first() { if let Some((&Lexeme::Id(ref id), input)) = input.split_first() { if input.get(0) == Some(&Lexeme::End) { return Ok(Stmt::Delete(id.clone()) ); } } } Err(()) } fn parse_clear(input: InType) -> ParseResult<Stmt, ()> { if input.len() == 2 { // clear, end if let Lexeme::Kw(KwToken::Clear) = input[0] { return Ok(Stmt::Clear); } } Err(()) } fn parse_run(input: InType) -> ParseResult<Stmt, ()> { if let Some((&Lexeme::Kw(KwToken::Run), input)) = input.split_first() { if let Some((&Lexeme::Id(ref s), input)) = input.split_first() { if input.get(0) == Some(&Lexeme::End) { return Ok(Stmt::Run(PathBuf::from(s) )); } } } Err(()) } fn parse_save(input: InType) -> ParseResult<Stmt, ()> { if let Some((&Lexeme::Kw(KwToken::Save), input)) = input.split_first() { if let Some((&Lexeme::Id(ref s), input)) = input.split_first() { if input.get(0) == Some(&Lexeme::End) { return Ok(Stmt::Save(PathBuf::from(s) )); } } } Err(()) }
parse_stmt
identifier_name
stmt_parse.rs
use std::path::PathBuf; use syntax_tree::*; use parser::lexer::lexer_util::lexemes::*; use parser::syntax_parser::expr_parse::*; use parser::syntax_parser::synpar_util::*; use error::{RollerErr, SynErr, ParseResult}; #[allow(unused_variables)] pub fn parse_stmt(input: InType) -> ParseResult<Stmt> { Err(()) .or(parse_assign(input)) .or(parse_fundef(input)) .or(parse_delete(input)) .or(parse_clear(input)) .or(parse_run(input)) .or(parse_save(input)) .or(Err(RollerErr::SyntaxError(SynErr::Unimplemented))) } fn parse_assign(input: InType) -> ParseResult<Stmt, ()> { // if we have an identifier, followed by =, followed by expression if let Some((&Lexeme::Id(ref id), input)) = input.split_first() { if let Some((&Lexeme::Eq, input)) = input.split_first() { if let Ok(exp) = parse_expr(input)
} } Err(()) } fn parse_fundef(input: InType) -> ParseResult<Stmt, ()> { if let Some((&Lexeme::Id(ref id), input)) = input.split_first() { if let Some((&Lexeme::LeftParen, input)) = input.split_first() { // parse parameters let mut mut_input = input.clone(); let mut param_ids = Vec::new(); loop { match mut_input.split_first() { Some((&Lexeme::Id(ref id), input)) => { // parameter found param_ids.push(id.clone()); mut_input = input; }, Some((&Lexeme::Comma, input)) => { // ignore commas // commas are optional mut_input = input }, Some((&Lexeme::RightParen, input)) => { // found the end of parameter definition mut_input = input; break; }, _ => return Err(()) } } // check if we have an equal sign and an expression if let Some((&Lexeme::Eq, input)) = mut_input.split_first() { if let Ok(exp) = parse_expr(input) { return Ok(Stmt::FnDef(id.clone(), RollerFun::new(param_ids, exp) )); } } } } Err(()) } fn parse_delete(input: InType) -> ParseResult<Stmt, ()> { if let Some((&Lexeme::Kw(KwToken::Delete), input)) = input.split_first() { if let Some((&Lexeme::Id(ref id), input)) = input.split_first() { if input.get(0) == Some(&Lexeme::End) { return Ok(Stmt::Delete(id.clone()) ); } } } Err(()) } fn parse_clear(input: InType) -> ParseResult<Stmt, ()> { if input.len() == 2 { // clear, end if let Lexeme::Kw(KwToken::Clear) = input[0] { return Ok(Stmt::Clear); } } Err(()) } fn parse_run(input: InType) -> ParseResult<Stmt, ()> { if let Some((&Lexeme::Kw(KwToken::Run), input)) = input.split_first() { if let Some((&Lexeme::Id(ref s), input)) = input.split_first() { if input.get(0) == Some(&Lexeme::End) { return Ok(Stmt::Run(PathBuf::from(s) )); } } } Err(()) } fn parse_save(input: InType) -> ParseResult<Stmt, ()> { if let Some((&Lexeme::Kw(KwToken::Save), input)) = input.split_first() { if let Some((&Lexeme::Id(ref s), input)) = input.split_first() { if input.get(0) == Some(&Lexeme::End) { return Ok(Stmt::Save(PathBuf::from(s) )); } } } Err(()) }
{ return Ok(Stmt::Assign(id.clone(), exp)); }
conditional_block
headless_window.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! A headless window implementation. use crate::window_trait::WindowPortsMethods; use glutin; use euclid::{default::Size2D as UntypedSize2D, Point2D, Rotation3D, Scale, Size2D, UnknownUnit}; use gleam::gl; use servo::compositing::windowing::{AnimationState, WindowEvent}; use servo::compositing::windowing::{EmbedderCoordinates, WindowMethods}; use servo::servo_config::opts; use servo::servo_geometry::DeviceIndependentPixel; use servo::style_traits::DevicePixel; use servo::webrender_api::units::{DeviceIntRect, DeviceIntSize}; use servo_media::player::context as MediaPlayerCtxt; use std::cell::Cell; #[cfg(any(target_os = "linux", target_os = "macos"))] use std::cell::RefCell; #[cfg(any(target_os = "linux", target_os = "macos"))] use std::ffi::CString; #[cfg(any(target_os = "linux", target_os = "macos"))] use std::mem; use std::os::raw::c_void; use std::ptr; use std::rc::Rc; #[cfg(any(target_os = "linux", target_os = "macos"))] struct HeadlessContext { width: u32, height: u32, context: osmesa_sys::OSMesaContext, buffer: RefCell<Vec<u32>>, } #[cfg(not(any(target_os = "linux", target_os = "macos")))] struct HeadlessContext { width: u32, height: u32, } impl HeadlessContext { #[cfg(any(target_os = "linux", target_os = "macos"))] fn new(width: u32, height: u32, share: Option<&HeadlessContext>) -> HeadlessContext { let mut attribs = Vec::new(); attribs.push(osmesa_sys::OSMESA_PROFILE); attribs.push(osmesa_sys::OSMESA_CORE_PROFILE); attribs.push(osmesa_sys::OSMESA_CONTEXT_MAJOR_VERSION); attribs.push(3); attribs.push(osmesa_sys::OSMESA_CONTEXT_MINOR_VERSION); attribs.push(3); attribs.push(0); let share = share.map_or(ptr::null_mut(), |share| share.context as *mut _); let context = unsafe { osmesa_sys::OSMesaCreateContextAttribs(attribs.as_ptr(), share) }; assert!(!context.is_null()); HeadlessContext { width: width, height: height, context: context, buffer: RefCell::new(vec![0; (width * height) as usize]), } } #[cfg(not(any(target_os = "linux", target_os = "macos")))] fn new(width: u32, height: u32, _share: Option<&HeadlessContext>) -> HeadlessContext { HeadlessContext { width: width, height: height, } } #[cfg(any(target_os = "linux", target_os = "macos"))] fn get_proc_address(s: &str) -> *const c_void { let c_str = CString::new(s).expect("Unable to create CString"); unsafe { mem::transmute(osmesa_sys::OSMesaGetProcAddress(c_str.as_ptr())) } } #[cfg(not(any(target_os = "linux", target_os = "macos")))] fn get_proc_address(_: &str) -> *const c_void
} pub struct Window { context: HeadlessContext, animation_state: Cell<AnimationState>, fullscreen: Cell<bool>, gl: Rc<dyn gl::Gl>, } impl Window { pub fn new(size: Size2D<u32, DeviceIndependentPixel>) -> Rc<dyn WindowPortsMethods> { let context = HeadlessContext::new(size.width, size.height, None); let gl = unsafe { gl::GlFns::load_with(|s| HeadlessContext::get_proc_address(s)) }; // Print some information about the headless renderer that // can be useful in diagnosing CI failures on build machines. println!("{}", gl.get_string(gl::VENDOR)); println!("{}", gl.get_string(gl::RENDERER)); println!("{}", gl.get_string(gl::VERSION)); let window = Window { context, gl, animation_state: Cell::new(AnimationState::Idle), fullscreen: Cell::new(false), }; Rc::new(window) } fn servo_hidpi_factor(&self) -> Scale<f32, DeviceIndependentPixel, DevicePixel> { match opts::get().device_pixels_per_px { Some(device_pixels_per_px) => Scale::new(device_pixels_per_px), _ => Scale::new(1.0), } } } impl WindowPortsMethods for Window { fn get_events(&self) -> Vec<WindowEvent> { vec![] } fn has_events(&self) -> bool { false } fn id(&self) -> glutin::WindowId { unsafe { glutin::WindowId::dummy() } } fn page_height(&self) -> f32 { let dpr = self.servo_hidpi_factor(); self.context.height as f32 * dpr.get() } fn set_fullscreen(&self, state: bool) { self.fullscreen.set(state); } fn get_fullscreen(&self) -> bool { return self.fullscreen.get(); } fn is_animating(&self) -> bool { self.animation_state.get() == AnimationState::Animating } fn winit_event_to_servo_event(&self, _event: glutin::WindowEvent) { // Not expecting any winit events. } } impl WindowMethods for Window { fn gl(&self) -> Rc<dyn gl::Gl> { self.gl.clone() } fn get_coordinates(&self) -> EmbedderCoordinates { let dpr = self.servo_hidpi_factor(); let size = (Size2D::new(self.context.width, self.context.height).to_f32() * dpr).to_i32(); let viewport = DeviceIntRect::new(Point2D::zero(), size); let framebuffer = DeviceIntSize::from_untyped(size.to_untyped()); EmbedderCoordinates { viewport, framebuffer, window: (size, Point2D::zero()), screen: size, screen_avail: size, hidpi_factor: dpr, } } fn present(&self) {} fn set_animation_state(&self, state: AnimationState) { self.animation_state.set(state); } #[cfg(any(target_os = "linux", target_os = "macos"))] fn prepare_for_composite(&self) { unsafe { let mut buffer = self.context.buffer.borrow_mut(); let ret = osmesa_sys::OSMesaMakeCurrent( self.context.context, buffer.as_mut_ptr() as *mut _, gl::UNSIGNED_BYTE, self.context.width as i32, self.context.height as i32, ); assert_ne!(ret, 0); }; } #[cfg(not(any(target_os = "linux", target_os = "macos")))] fn prepare_for_composite(&self) {} fn get_gl_context(&self) -> MediaPlayerCtxt::GlContext { MediaPlayerCtxt::GlContext::Unknown } fn get_native_display(&self) -> MediaPlayerCtxt::NativeDisplay { MediaPlayerCtxt::NativeDisplay::Unknown } fn get_gl_api(&self) -> MediaPlayerCtxt::GlApi { MediaPlayerCtxt::GlApi::None } } impl webxr::glwindow::GlWindow for Window { fn make_current(&self) {} fn swap_buffers(&self) {} fn size(&self) -> UntypedSize2D<gl::GLsizei> { let dpr = self.servo_hidpi_factor().get(); Size2D::new((self.context.width as f32 * dpr) as gl::GLsizei, (self.context.height as f32 * dpr) as gl::GLsizei) } fn new_window(&self) -> Result<Rc<dyn webxr::glwindow::GlWindow>, ()> { let width = self.context.width; let height = self.context.height; let share = Some(&self.context); let context = HeadlessContext::new(width, height, share); let gl = self.gl.clone(); Ok(Rc::new(Window { context, gl, animation_state: Cell::new(AnimationState::Idle), fullscreen: Cell::new(false), })) } fn get_rotation(&self) -> Rotation3D<f32, UnknownUnit, UnknownUnit> { Rotation3D::identity() } }
{ ptr::null() as *const _ }
identifier_body
headless_window.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! A headless window implementation. use crate::window_trait::WindowPortsMethods; use glutin; use euclid::{default::Size2D as UntypedSize2D, Point2D, Rotation3D, Scale, Size2D, UnknownUnit}; use gleam::gl; use servo::compositing::windowing::{AnimationState, WindowEvent}; use servo::compositing::windowing::{EmbedderCoordinates, WindowMethods}; use servo::servo_config::opts; use servo::servo_geometry::DeviceIndependentPixel; use servo::style_traits::DevicePixel; use servo::webrender_api::units::{DeviceIntRect, DeviceIntSize}; use servo_media::player::context as MediaPlayerCtxt; use std::cell::Cell; #[cfg(any(target_os = "linux", target_os = "macos"))] use std::cell::RefCell; #[cfg(any(target_os = "linux", target_os = "macos"))] use std::ffi::CString; #[cfg(any(target_os = "linux", target_os = "macos"))] use std::mem; use std::os::raw::c_void; use std::ptr; use std::rc::Rc; #[cfg(any(target_os = "linux", target_os = "macos"))] struct HeadlessContext { width: u32, height: u32, context: osmesa_sys::OSMesaContext, buffer: RefCell<Vec<u32>>, } #[cfg(not(any(target_os = "linux", target_os = "macos")))] struct HeadlessContext { width: u32, height: u32, } impl HeadlessContext { #[cfg(any(target_os = "linux", target_os = "macos"))] fn new(width: u32, height: u32, share: Option<&HeadlessContext>) -> HeadlessContext { let mut attribs = Vec::new(); attribs.push(osmesa_sys::OSMESA_PROFILE); attribs.push(osmesa_sys::OSMESA_CORE_PROFILE); attribs.push(osmesa_sys::OSMESA_CONTEXT_MAJOR_VERSION); attribs.push(3); attribs.push(osmesa_sys::OSMESA_CONTEXT_MINOR_VERSION); attribs.push(3); attribs.push(0); let share = share.map_or(ptr::null_mut(), |share| share.context as *mut _); let context = unsafe { osmesa_sys::OSMesaCreateContextAttribs(attribs.as_ptr(), share) }; assert!(!context.is_null()); HeadlessContext { width: width, height: height, context: context, buffer: RefCell::new(vec![0; (width * height) as usize]), } } #[cfg(not(any(target_os = "linux", target_os = "macos")))] fn new(width: u32, height: u32, _share: Option<&HeadlessContext>) -> HeadlessContext { HeadlessContext { width: width, height: height, } } #[cfg(any(target_os = "linux", target_os = "macos"))] fn
(s: &str) -> *const c_void { let c_str = CString::new(s).expect("Unable to create CString"); unsafe { mem::transmute(osmesa_sys::OSMesaGetProcAddress(c_str.as_ptr())) } } #[cfg(not(any(target_os = "linux", target_os = "macos")))] fn get_proc_address(_: &str) -> *const c_void { ptr::null() as *const _ } } pub struct Window { context: HeadlessContext, animation_state: Cell<AnimationState>, fullscreen: Cell<bool>, gl: Rc<dyn gl::Gl>, } impl Window { pub fn new(size: Size2D<u32, DeviceIndependentPixel>) -> Rc<dyn WindowPortsMethods> { let context = HeadlessContext::new(size.width, size.height, None); let gl = unsafe { gl::GlFns::load_with(|s| HeadlessContext::get_proc_address(s)) }; // Print some information about the headless renderer that // can be useful in diagnosing CI failures on build machines. println!("{}", gl.get_string(gl::VENDOR)); println!("{}", gl.get_string(gl::RENDERER)); println!("{}", gl.get_string(gl::VERSION)); let window = Window { context, gl, animation_state: Cell::new(AnimationState::Idle), fullscreen: Cell::new(false), }; Rc::new(window) } fn servo_hidpi_factor(&self) -> Scale<f32, DeviceIndependentPixel, DevicePixel> { match opts::get().device_pixels_per_px { Some(device_pixels_per_px) => Scale::new(device_pixels_per_px), _ => Scale::new(1.0), } } } impl WindowPortsMethods for Window { fn get_events(&self) -> Vec<WindowEvent> { vec![] } fn has_events(&self) -> bool { false } fn id(&self) -> glutin::WindowId { unsafe { glutin::WindowId::dummy() } } fn page_height(&self) -> f32 { let dpr = self.servo_hidpi_factor(); self.context.height as f32 * dpr.get() } fn set_fullscreen(&self, state: bool) { self.fullscreen.set(state); } fn get_fullscreen(&self) -> bool { return self.fullscreen.get(); } fn is_animating(&self) -> bool { self.animation_state.get() == AnimationState::Animating } fn winit_event_to_servo_event(&self, _event: glutin::WindowEvent) { // Not expecting any winit events. } } impl WindowMethods for Window { fn gl(&self) -> Rc<dyn gl::Gl> { self.gl.clone() } fn get_coordinates(&self) -> EmbedderCoordinates { let dpr = self.servo_hidpi_factor(); let size = (Size2D::new(self.context.width, self.context.height).to_f32() * dpr).to_i32(); let viewport = DeviceIntRect::new(Point2D::zero(), size); let framebuffer = DeviceIntSize::from_untyped(size.to_untyped()); EmbedderCoordinates { viewport, framebuffer, window: (size, Point2D::zero()), screen: size, screen_avail: size, hidpi_factor: dpr, } } fn present(&self) {} fn set_animation_state(&self, state: AnimationState) { self.animation_state.set(state); } #[cfg(any(target_os = "linux", target_os = "macos"))] fn prepare_for_composite(&self) { unsafe { let mut buffer = self.context.buffer.borrow_mut(); let ret = osmesa_sys::OSMesaMakeCurrent( self.context.context, buffer.as_mut_ptr() as *mut _, gl::UNSIGNED_BYTE, self.context.width as i32, self.context.height as i32, ); assert_ne!(ret, 0); }; } #[cfg(not(any(target_os = "linux", target_os = "macos")))] fn prepare_for_composite(&self) {} fn get_gl_context(&self) -> MediaPlayerCtxt::GlContext { MediaPlayerCtxt::GlContext::Unknown } fn get_native_display(&self) -> MediaPlayerCtxt::NativeDisplay { MediaPlayerCtxt::NativeDisplay::Unknown } fn get_gl_api(&self) -> MediaPlayerCtxt::GlApi { MediaPlayerCtxt::GlApi::None } } impl webxr::glwindow::GlWindow for Window { fn make_current(&self) {} fn swap_buffers(&self) {} fn size(&self) -> UntypedSize2D<gl::GLsizei> { let dpr = self.servo_hidpi_factor().get(); Size2D::new((self.context.width as f32 * dpr) as gl::GLsizei, (self.context.height as f32 * dpr) as gl::GLsizei) } fn new_window(&self) -> Result<Rc<dyn webxr::glwindow::GlWindow>, ()> { let width = self.context.width; let height = self.context.height; let share = Some(&self.context); let context = HeadlessContext::new(width, height, share); let gl = self.gl.clone(); Ok(Rc::new(Window { context, gl, animation_state: Cell::new(AnimationState::Idle), fullscreen: Cell::new(false), })) } fn get_rotation(&self) -> Rotation3D<f32, UnknownUnit, UnknownUnit> { Rotation3D::identity() } }
get_proc_address
identifier_name
headless_window.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! A headless window implementation. use crate::window_trait::WindowPortsMethods; use glutin; use euclid::{default::Size2D as UntypedSize2D, Point2D, Rotation3D, Scale, Size2D, UnknownUnit}; use gleam::gl; use servo::compositing::windowing::{AnimationState, WindowEvent}; use servo::compositing::windowing::{EmbedderCoordinates, WindowMethods}; use servo::servo_config::opts; use servo::servo_geometry::DeviceIndependentPixel; use servo::style_traits::DevicePixel; use servo::webrender_api::units::{DeviceIntRect, DeviceIntSize}; use servo_media::player::context as MediaPlayerCtxt; use std::cell::Cell; #[cfg(any(target_os = "linux", target_os = "macos"))] use std::cell::RefCell; #[cfg(any(target_os = "linux", target_os = "macos"))] use std::ffi::CString; #[cfg(any(target_os = "linux", target_os = "macos"))] use std::mem; use std::os::raw::c_void; use std::ptr; use std::rc::Rc; #[cfg(any(target_os = "linux", target_os = "macos"))] struct HeadlessContext { width: u32, height: u32, context: osmesa_sys::OSMesaContext, buffer: RefCell<Vec<u32>>, } #[cfg(not(any(target_os = "linux", target_os = "macos")))] struct HeadlessContext { width: u32, height: u32, } impl HeadlessContext { #[cfg(any(target_os = "linux", target_os = "macos"))] fn new(width: u32, height: u32, share: Option<&HeadlessContext>) -> HeadlessContext { let mut attribs = Vec::new(); attribs.push(osmesa_sys::OSMESA_PROFILE); attribs.push(osmesa_sys::OSMESA_CORE_PROFILE); attribs.push(osmesa_sys::OSMESA_CONTEXT_MAJOR_VERSION); attribs.push(3); attribs.push(osmesa_sys::OSMESA_CONTEXT_MINOR_VERSION); attribs.push(3); attribs.push(0); let share = share.map_or(ptr::null_mut(), |share| share.context as *mut _); let context = unsafe { osmesa_sys::OSMesaCreateContextAttribs(attribs.as_ptr(), share) }; assert!(!context.is_null()); HeadlessContext { width: width, height: height, context: context, buffer: RefCell::new(vec![0; (width * height) as usize]), } } #[cfg(not(any(target_os = "linux", target_os = "macos")))] fn new(width: u32, height: u32, _share: Option<&HeadlessContext>) -> HeadlessContext { HeadlessContext { width: width, height: height, } } #[cfg(any(target_os = "linux", target_os = "macos"))] fn get_proc_address(s: &str) -> *const c_void { let c_str = CString::new(s).expect("Unable to create CString"); unsafe { mem::transmute(osmesa_sys::OSMesaGetProcAddress(c_str.as_ptr())) } } #[cfg(not(any(target_os = "linux", target_os = "macos")))] fn get_proc_address(_: &str) -> *const c_void { ptr::null() as *const _ } } pub struct Window { context: HeadlessContext, animation_state: Cell<AnimationState>, fullscreen: Cell<bool>, gl: Rc<dyn gl::Gl>, } impl Window { pub fn new(size: Size2D<u32, DeviceIndependentPixel>) -> Rc<dyn WindowPortsMethods> { let context = HeadlessContext::new(size.width, size.height, None); let gl = unsafe { gl::GlFns::load_with(|s| HeadlessContext::get_proc_address(s)) }; // Print some information about the headless renderer that // can be useful in diagnosing CI failures on build machines. println!("{}", gl.get_string(gl::VENDOR)); println!("{}", gl.get_string(gl::RENDERER)); println!("{}", gl.get_string(gl::VERSION)); let window = Window { context, gl, animation_state: Cell::new(AnimationState::Idle), fullscreen: Cell::new(false), };
fn servo_hidpi_factor(&self) -> Scale<f32, DeviceIndependentPixel, DevicePixel> { match opts::get().device_pixels_per_px { Some(device_pixels_per_px) => Scale::new(device_pixels_per_px), _ => Scale::new(1.0), } } } impl WindowPortsMethods for Window { fn get_events(&self) -> Vec<WindowEvent> { vec![] } fn has_events(&self) -> bool { false } fn id(&self) -> glutin::WindowId { unsafe { glutin::WindowId::dummy() } } fn page_height(&self) -> f32 { let dpr = self.servo_hidpi_factor(); self.context.height as f32 * dpr.get() } fn set_fullscreen(&self, state: bool) { self.fullscreen.set(state); } fn get_fullscreen(&self) -> bool { return self.fullscreen.get(); } fn is_animating(&self) -> bool { self.animation_state.get() == AnimationState::Animating } fn winit_event_to_servo_event(&self, _event: glutin::WindowEvent) { // Not expecting any winit events. } } impl WindowMethods for Window { fn gl(&self) -> Rc<dyn gl::Gl> { self.gl.clone() } fn get_coordinates(&self) -> EmbedderCoordinates { let dpr = self.servo_hidpi_factor(); let size = (Size2D::new(self.context.width, self.context.height).to_f32() * dpr).to_i32(); let viewport = DeviceIntRect::new(Point2D::zero(), size); let framebuffer = DeviceIntSize::from_untyped(size.to_untyped()); EmbedderCoordinates { viewport, framebuffer, window: (size, Point2D::zero()), screen: size, screen_avail: size, hidpi_factor: dpr, } } fn present(&self) {} fn set_animation_state(&self, state: AnimationState) { self.animation_state.set(state); } #[cfg(any(target_os = "linux", target_os = "macos"))] fn prepare_for_composite(&self) { unsafe { let mut buffer = self.context.buffer.borrow_mut(); let ret = osmesa_sys::OSMesaMakeCurrent( self.context.context, buffer.as_mut_ptr() as *mut _, gl::UNSIGNED_BYTE, self.context.width as i32, self.context.height as i32, ); assert_ne!(ret, 0); }; } #[cfg(not(any(target_os = "linux", target_os = "macos")))] fn prepare_for_composite(&self) {} fn get_gl_context(&self) -> MediaPlayerCtxt::GlContext { MediaPlayerCtxt::GlContext::Unknown } fn get_native_display(&self) -> MediaPlayerCtxt::NativeDisplay { MediaPlayerCtxt::NativeDisplay::Unknown } fn get_gl_api(&self) -> MediaPlayerCtxt::GlApi { MediaPlayerCtxt::GlApi::None } } impl webxr::glwindow::GlWindow for Window { fn make_current(&self) {} fn swap_buffers(&self) {} fn size(&self) -> UntypedSize2D<gl::GLsizei> { let dpr = self.servo_hidpi_factor().get(); Size2D::new((self.context.width as f32 * dpr) as gl::GLsizei, (self.context.height as f32 * dpr) as gl::GLsizei) } fn new_window(&self) -> Result<Rc<dyn webxr::glwindow::GlWindow>, ()> { let width = self.context.width; let height = self.context.height; let share = Some(&self.context); let context = HeadlessContext::new(width, height, share); let gl = self.gl.clone(); Ok(Rc::new(Window { context, gl, animation_state: Cell::new(AnimationState::Idle), fullscreen: Cell::new(false), })) } fn get_rotation(&self) -> Rotation3D<f32, UnknownUnit, UnknownUnit> { Rotation3D::identity() } }
Rc::new(window) }
random_line_split